All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.protocol.proto.HdfsProtos Maven / Gradle / Ivy

There is a newer version: 3.4.1
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: hdfs.proto

package org.apache.hadoop.hdfs.protocol.proto;

public final class HdfsProtos {
  private HdfsProtos() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  /**
   * 
   **
   * Types of recognized storage media.
   * 
* * Protobuf enum {@code hadoop.hdfs.StorageTypeProto} */ public enum StorageTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * DISK = 1; */ DISK(1), /** * SSD = 2; */ SSD(2), /** * ARCHIVE = 3; */ ARCHIVE(3), /** * RAM_DISK = 4; */ RAM_DISK(4), /** * PROVIDED = 5; */ PROVIDED(5), /** * NVDIMM = 6; */ NVDIMM(6), ; /** * DISK = 1; */ public static final int DISK_VALUE = 1; /** * SSD = 2; */ public static final int SSD_VALUE = 2; /** * ARCHIVE = 3; */ public static final int ARCHIVE_VALUE = 3; /** * RAM_DISK = 4; */ public static final int RAM_DISK_VALUE = 4; /** * PROVIDED = 5; */ public static final int PROVIDED_VALUE = 5; /** * NVDIMM = 6; */ public static final int NVDIMM_VALUE = 6; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static StorageTypeProto valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static StorageTypeProto forNumber(int value) { switch (value) { case 1: return DISK; case 2: return SSD; case 3: return ARCHIVE; case 4: return RAM_DISK; case 5: return PROVIDED; case 6: return NVDIMM; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< StorageTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public StorageTypeProto findValueByNumber(int number) { return StorageTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0); } private static final StorageTypeProto[] VALUES = values(); public static StorageTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private StorageTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.StorageTypeProto) } /** *
   **
   * Types of recognized blocks.
   * 
* * Protobuf enum {@code hadoop.hdfs.BlockTypeProto} */ public enum BlockTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * CONTIGUOUS = 0; */ CONTIGUOUS(0), /** * STRIPED = 1; */ STRIPED(1), ; /** * CONTIGUOUS = 0; */ public static final int CONTIGUOUS_VALUE = 0; /** * STRIPED = 1; */ public static final int STRIPED_VALUE = 1; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static BlockTypeProto valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static BlockTypeProto forNumber(int value) { switch (value) { case 0: return CONTIGUOUS; case 1: return STRIPED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< BlockTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public BlockTypeProto findValueByNumber(int number) { return BlockTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(1); } private static final BlockTypeProto[] VALUES = values(); public static BlockTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private BlockTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockTypeProto) } /** *
   **
   * Cipher suite.
   * 
* * Protobuf enum {@code hadoop.hdfs.CipherSuiteProto} */ public enum CipherSuiteProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * UNKNOWN = 1; */ UNKNOWN(1), /** * AES_CTR_NOPADDING = 2; */ AES_CTR_NOPADDING(2), /** * SM4_CTR_NOPADDING = 3; */ SM4_CTR_NOPADDING(3), ; /** * UNKNOWN = 1; */ public static final int UNKNOWN_VALUE = 1; /** * AES_CTR_NOPADDING = 2; */ public static final int AES_CTR_NOPADDING_VALUE = 2; /** * SM4_CTR_NOPADDING = 3; */ public static final int SM4_CTR_NOPADDING_VALUE = 3; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static CipherSuiteProto valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static CipherSuiteProto forNumber(int value) { switch (value) { case 1: return UNKNOWN; case 2: return AES_CTR_NOPADDING; case 3: return SM4_CTR_NOPADDING; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< CipherSuiteProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public CipherSuiteProto findValueByNumber(int number) { return CipherSuiteProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(2); } private static final CipherSuiteProto[] VALUES = values(); public static CipherSuiteProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private CipherSuiteProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CipherSuiteProto) } /** *
   **
   * Crypto protocol version used to access encrypted files.
   * 
* * Protobuf enum {@code hadoop.hdfs.CryptoProtocolVersionProto} */ public enum CryptoProtocolVersionProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * UNKNOWN_PROTOCOL_VERSION = 1; */ UNKNOWN_PROTOCOL_VERSION(1), /** * ENCRYPTION_ZONES = 2; */ ENCRYPTION_ZONES(2), ; /** * UNKNOWN_PROTOCOL_VERSION = 1; */ public static final int UNKNOWN_PROTOCOL_VERSION_VALUE = 1; /** * ENCRYPTION_ZONES = 2; */ public static final int ENCRYPTION_ZONES_VALUE = 2; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static CryptoProtocolVersionProto valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static CryptoProtocolVersionProto forNumber(int value) { switch (value) { case 1: return UNKNOWN_PROTOCOL_VERSION; case 2: return ENCRYPTION_ZONES; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< CryptoProtocolVersionProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public CryptoProtocolVersionProto findValueByNumber(int number) { return CryptoProtocolVersionProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(3); } private static final CryptoProtocolVersionProto[] VALUES = values(); public static CryptoProtocolVersionProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private CryptoProtocolVersionProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CryptoProtocolVersionProto) } /** *
   **
   * EC policy state.
   * 
* * Protobuf enum {@code hadoop.hdfs.ErasureCodingPolicyState} */ public enum ErasureCodingPolicyState implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * DISABLED = 1; */ DISABLED(1), /** * ENABLED = 2; */ ENABLED(2), /** * REMOVED = 3; */ REMOVED(3), ; /** * DISABLED = 1; */ public static final int DISABLED_VALUE = 1; /** * ENABLED = 2; */ public static final int ENABLED_VALUE = 2; /** * REMOVED = 3; */ public static final int REMOVED_VALUE = 3; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static ErasureCodingPolicyState valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static ErasureCodingPolicyState forNumber(int value) { switch (value) { case 1: return DISABLED; case 2: return ENABLED; case 3: return REMOVED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< ErasureCodingPolicyState> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public ErasureCodingPolicyState findValueByNumber(int number) { return ErasureCodingPolicyState.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(4); } private static final ErasureCodingPolicyState[] VALUES = values(); public static ErasureCodingPolicyState valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private ErasureCodingPolicyState(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ErasureCodingPolicyState) } /** *
   **
   * Checksum algorithms/types used in HDFS
   * Make sure this enum's integer values match enum values' id properties defined
   * in org.apache.hadoop.util.DataChecksum.Type
   * 
* * Protobuf enum {@code hadoop.hdfs.ChecksumTypeProto} */ public enum ChecksumTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * CHECKSUM_NULL = 0; */ CHECKSUM_NULL(0), /** * CHECKSUM_CRC32 = 1; */ CHECKSUM_CRC32(1), /** * CHECKSUM_CRC32C = 2; */ CHECKSUM_CRC32C(2), ; /** * CHECKSUM_NULL = 0; */ public static final int CHECKSUM_NULL_VALUE = 0; /** * CHECKSUM_CRC32 = 1; */ public static final int CHECKSUM_CRC32_VALUE = 1; /** * CHECKSUM_CRC32C = 2; */ public static final int CHECKSUM_CRC32C_VALUE = 2; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static ChecksumTypeProto valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static ChecksumTypeProto forNumber(int value) { switch (value) { case 0: return CHECKSUM_NULL; case 1: return CHECKSUM_CRC32; case 2: return CHECKSUM_CRC32C; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< ChecksumTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public ChecksumTypeProto findValueByNumber(int number) { return ChecksumTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(5); } private static final ChecksumTypeProto[] VALUES = values(); public static ChecksumTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private ChecksumTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ChecksumTypeProto) } /** * Protobuf enum {@code hadoop.hdfs.BlockChecksumTypeProto} */ public enum BlockChecksumTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** *
     * BlockChecksum obtained by taking the MD5 digest of chunk CRCs
     * 
* * MD5CRC = 1; */ MD5CRC(1), /** *
     * Chunk-independent CRC, optionally striped
     * 
* * COMPOSITE_CRC = 2; */ COMPOSITE_CRC(2), ; /** *
     * BlockChecksum obtained by taking the MD5 digest of chunk CRCs
     * 
* * MD5CRC = 1; */ public static final int MD5CRC_VALUE = 1; /** *
     * Chunk-independent CRC, optionally striped
     * 
* * COMPOSITE_CRC = 2; */ public static final int COMPOSITE_CRC_VALUE = 2; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static BlockChecksumTypeProto valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static BlockChecksumTypeProto forNumber(int value) { switch (value) { case 1: return MD5CRC; case 2: return COMPOSITE_CRC; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< BlockChecksumTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public BlockChecksumTypeProto findValueByNumber(int number) { return BlockChecksumTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(6); } private static final BlockChecksumTypeProto[] VALUES = values(); public static BlockChecksumTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private BlockChecksumTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockChecksumTypeProto) } /** *
   **
   * File access permissions mode.
   * 
* * Protobuf enum {@code hadoop.hdfs.AccessModeProto} */ public enum AccessModeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * READ = 1; */ READ(1), /** * WRITE = 2; */ WRITE(2), /** * COPY = 3; */ COPY(3), /** * REPLACE = 4; */ REPLACE(4), ; /** * READ = 1; */ public static final int READ_VALUE = 1; /** * WRITE = 2; */ public static final int WRITE_VALUE = 2; /** * COPY = 3; */ public static final int COPY_VALUE = 3; /** * REPLACE = 4; */ public static final int REPLACE_VALUE = 4; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AccessModeProto valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static AccessModeProto forNumber(int value) { switch (value) { case 1: return READ; case 2: return WRITE; case 3: return COPY; case 4: return REPLACE; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< AccessModeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public AccessModeProto findValueByNumber(int number) { return AccessModeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(7); } private static final AccessModeProto[] VALUES = values(); public static AccessModeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private AccessModeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.AccessModeProto) } public interface ExtendedBlockProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ExtendedBlockProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; * @return Whether the poolId field is set. */ boolean hasPoolId(); /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; * @return The poolId. */ java.lang.String getPoolId(); /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; * @return The bytes for poolId. */ org.apache.hadoop.thirdparty.protobuf.ByteString getPoolIdBytes(); /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; * @return Whether the blockId field is set. */ boolean hasBlockId(); /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; * @return The blockId. */ long getBlockId(); /** * required uint64 generationStamp = 3; * @return Whether the generationStamp field is set. */ boolean hasGenerationStamp(); /** * required uint64 generationStamp = 3; * @return The generationStamp. */ long getGenerationStamp(); /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; * @return Whether the numBytes field is set. */ boolean hasNumBytes(); /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; * @return The numBytes. */ long getNumBytes(); } /** *
   **
   * Extended block idenfies a block
   * 
* * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto} */ public static final class ExtendedBlockProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ExtendedBlockProto) ExtendedBlockProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ExtendedBlockProto.newBuilder() to construct. private ExtendedBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ExtendedBlockProto() { poolId_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ExtendedBlockProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class); } private int bitField0_; public static final int POOLID_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object poolId_ = ""; /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; * @return Whether the poolId field is set. */ @java.lang.Override public boolean hasPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; * @return The poolId. */ @java.lang.Override public java.lang.String getPoolId() { java.lang.Object ref = poolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { poolId_ = s; } return s; } } /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; * @return The bytes for poolId. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getPoolIdBytes() { java.lang.Object ref = poolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int BLOCKID_FIELD_NUMBER = 2; private long blockId_ = 0L; /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; * @return Whether the blockId field is set. */ @java.lang.Override public boolean hasBlockId() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; * @return The blockId. */ @java.lang.Override public long getBlockId() { return blockId_; } public static final int GENERATIONSTAMP_FIELD_NUMBER = 3; private long generationStamp_ = 0L; /** * required uint64 generationStamp = 3; * @return Whether the generationStamp field is set. */ @java.lang.Override public boolean hasGenerationStamp() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 generationStamp = 3; * @return The generationStamp. */ @java.lang.Override public long getGenerationStamp() { return generationStamp_; } public static final int NUMBYTES_FIELD_NUMBER = 4; private long numBytes_ = 0L; /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; * @return Whether the numBytes field is set. */ @java.lang.Override public boolean hasNumBytes() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; * @return The numBytes. */ @java.lang.Override public long getNumBytes() { return numBytes_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPoolId()) { memoizedIsInitialized = 0; return false; } if (!hasBlockId()) { memoizedIsInitialized = 0; return false; } if (!hasGenerationStamp()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, poolId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, blockId_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, generationStamp_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, numBytes_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, poolId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, blockId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, generationStamp_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, numBytes_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) obj; if (hasPoolId() != other.hasPoolId()) return false; if (hasPoolId()) { if (!getPoolId() .equals(other.getPoolId())) return false; } if (hasBlockId() != other.hasBlockId()) return false; if (hasBlockId()) { if (getBlockId() != other.getBlockId()) return false; } if (hasGenerationStamp() != other.hasGenerationStamp()) return false; if (hasGenerationStamp()) { if (getGenerationStamp() != other.getGenerationStamp()) return false; } if (hasNumBytes() != other.hasNumBytes()) return false; if (hasNumBytes()) { if (getNumBytes() != other.getNumBytes()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPoolId()) { hash = (37 * hash) + POOLID_FIELD_NUMBER; hash = (53 * hash) + getPoolId().hashCode(); } if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockId()); } if (hasGenerationStamp()) { hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getGenerationStamp()); } if (hasNumBytes()) { hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumBytes()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Extended block idenfies a block
     * 
* * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ExtendedBlockProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; poolId_ = ""; blockId_ = 0L; generationStamp_ = 0L; numBytes_ = 0L; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.poolId_ = poolId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.blockId_ = blockId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.generationStamp_ = generationStamp_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.numBytes_ = numBytes_; to_bitField0_ |= 0x00000008; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) return this; if (other.hasPoolId()) { poolId_ = other.poolId_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (other.hasGenerationStamp()) { setGenerationStamp(other.getGenerationStamp()); } if (other.hasNumBytes()) { setNumBytes(other.getNumBytes()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPoolId()) { return false; } if (!hasBlockId()) { return false; } if (!hasGenerationStamp()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { poolId_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { blockId_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { generationStamp_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { numBytes_ = input.readUInt64(); bitField0_ |= 0x00000008; break; } // case 32 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object poolId_ = ""; /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; * @return Whether the poolId field is set. */ public boolean hasPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; * @return The poolId. */ public java.lang.String getPoolId() { java.lang.Object ref = poolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { poolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; * @return The bytes for poolId. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPoolIdBytes() { java.lang.Object ref = poolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; * @param value The poolId to set. * @return This builder for chaining. */ public Builder setPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } poolId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; * @return This builder for chaining. */ public Builder clearPoolId() { poolId_ = getDefaultInstance().getPoolId(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; * @param value The bytes for poolId to set. * @return This builder for chaining. */ public Builder setPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } poolId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private long blockId_ ; /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; * @return Whether the blockId field is set. */ @java.lang.Override public boolean hasBlockId() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; * @return The blockId. */ @java.lang.Override public long getBlockId() { return blockId_; } /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; * @param value The blockId to set. * @return This builder for chaining. */ public Builder setBlockId(long value) { blockId_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; * @return This builder for chaining. */ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000002); blockId_ = 0L; onChanged(); return this; } private long generationStamp_ ; /** * required uint64 generationStamp = 3; * @return Whether the generationStamp field is set. */ @java.lang.Override public boolean hasGenerationStamp() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 generationStamp = 3; * @return The generationStamp. */ @java.lang.Override public long getGenerationStamp() { return generationStamp_; } /** * required uint64 generationStamp = 3; * @param value The generationStamp to set. * @return This builder for chaining. */ public Builder setGenerationStamp(long value) { generationStamp_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint64 generationStamp = 3; * @return This builder for chaining. */ public Builder clearGenerationStamp() { bitField0_ = (bitField0_ & ~0x00000004); generationStamp_ = 0L; onChanged(); return this; } private long numBytes_ ; /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; * @return Whether the numBytes field is set. */ @java.lang.Override public boolean hasNumBytes() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; * @return The numBytes. */ @java.lang.Override public long getNumBytes() { return numBytes_; } /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; * @param value The numBytes to set. * @return This builder for chaining. */ public Builder setNumBytes(long value) { numBytes_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; * @return This builder for chaining. */ public Builder clearNumBytes() { bitField0_ = (bitField0_ & ~0x00000008); numBytes_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExtendedBlockProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ExtendedBlockProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ExtendedBlockProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ProvidedStorageLocationProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ProvidedStorageLocationProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string path = 1; * @return Whether the path field is set. */ boolean hasPath(); /** * required string path = 1; * @return The path. */ java.lang.String getPath(); /** * required string path = 1; * @return The bytes for path. */ org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes(); /** * required int64 offset = 2; * @return Whether the offset field is set. */ boolean hasOffset(); /** * required int64 offset = 2; * @return The offset. */ long getOffset(); /** * required int64 length = 3; * @return Whether the length field is set. */ boolean hasLength(); /** * required int64 length = 3; * @return The length. */ long getLength(); /** * required bytes nonce = 4; * @return Whether the nonce field is set. */ boolean hasNonce(); /** * required bytes nonce = 4; * @return The nonce. */ org.apache.hadoop.thirdparty.protobuf.ByteString getNonce(); } /** * Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto} */ public static final class ProvidedStorageLocationProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ProvidedStorageLocationProto) ProvidedStorageLocationProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ProvidedStorageLocationProto.newBuilder() to construct. private ProvidedStorageLocationProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ProvidedStorageLocationProto() { path_ = ""; nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ProvidedStorageLocationProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class); } private int bitField0_; public static final int PATH_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object path_ = ""; /** * required string path = 1; * @return Whether the path field is set. */ @java.lang.Override public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; * @return The path. */ @java.lang.Override public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; * @return The bytes for path. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int OFFSET_FIELD_NUMBER = 2; private long offset_ = 0L; /** * required int64 offset = 2; * @return Whether the offset field is set. */ @java.lang.Override public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** * required int64 offset = 2; * @return The offset. */ @java.lang.Override public long getOffset() { return offset_; } public static final int LENGTH_FIELD_NUMBER = 3; private long length_ = 0L; /** * required int64 length = 3; * @return Whether the length field is set. */ @java.lang.Override public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required int64 length = 3; * @return The length. */ @java.lang.Override public long getLength() { return length_; } public static final int NONCE_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes nonce = 4; * @return Whether the nonce field is set. */ @java.lang.Override public boolean hasNonce() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes nonce = 4; * @return The nonce. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasNonce()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeInt64(2, offset_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeInt64(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, nonce_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(2, offset_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, nonce_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) obj; if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (hasOffset() != other.hasOffset()) return false; if (hasOffset()) { if (getOffset() != other.getOffset()) return false; } if (hasLength() != other.hasLength()) return false; if (hasLength()) { if (getLength() != other.getLength()) return false; } if (hasNonce() != other.hasNonce()) return false; if (hasNonce()) { if (!getNonce() .equals(other.getNonce())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasOffset()) { hash = (37 * hash) + OFFSET_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getOffset()); } if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLength()); } if (hasNonce()) { hash = (37 * hash) + NONCE_FIELD_NUMBER; hash = (53 * hash) + getNonce().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ProvidedStorageLocationProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; path_ = ""; offset_ = 0L; length_ = 0L; nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.path_ = path_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.offset_ = offset_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.length_ = length_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.nonce_ = nonce_; to_bitField0_ |= 0x00000008; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance()) return this; if (other.hasPath()) { path_ = other.path_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasOffset()) { setOffset(other.getOffset()); } if (other.hasLength()) { setLength(other.getLength()); } if (other.hasNonce()) { setNonce(other.getNonce()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPath()) { return false; } if (!hasOffset()) { return false; } if (!hasLength()) { return false; } if (!hasNonce()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { path_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { offset_ = input.readInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { length_ = input.readInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 34: { nonce_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object path_ = ""; /** * required string path = 1; * @return Whether the path field is set. */ public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; * @return The path. */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; * @return The bytes for path. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string path = 1; * @param value The path to set. * @return This builder for chaining. */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } path_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string path = 1; * @return This builder for chaining. */ public Builder clearPath() { path_ = getDefaultInstance().getPath(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string path = 1; * @param value The bytes for path to set. * @return This builder for chaining. */ public Builder setPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } path_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private long offset_ ; /** * required int64 offset = 2; * @return Whether the offset field is set. */ @java.lang.Override public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** * required int64 offset = 2; * @return The offset. */ @java.lang.Override public long getOffset() { return offset_; } /** * required int64 offset = 2; * @param value The offset to set. * @return This builder for chaining. */ public Builder setOffset(long value) { offset_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required int64 offset = 2; * @return This builder for chaining. */ public Builder clearOffset() { bitField0_ = (bitField0_ & ~0x00000002); offset_ = 0L; onChanged(); return this; } private long length_ ; /** * required int64 length = 3; * @return Whether the length field is set. */ @java.lang.Override public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required int64 length = 3; * @return The length. */ @java.lang.Override public long getLength() { return length_; } /** * required int64 length = 3; * @param value The length to set. * @return This builder for chaining. */ public Builder setLength(long value) { length_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required int64 length = 3; * @return This builder for chaining. */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000004); length_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes nonce = 4; * @return Whether the nonce field is set. */ @java.lang.Override public boolean hasNonce() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes nonce = 4; * @return The nonce. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } /** * required bytes nonce = 4; * @param value The nonce to set. * @return This builder for chaining. */ public Builder setNonce(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } nonce_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required bytes nonce = 4; * @return This builder for chaining. */ public Builder clearNonce() { bitField0_ = (bitField0_ & ~0x00000008); nonce_ = getDefaultInstance().getNonce(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ProvidedStorageLocationProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ProvidedStorageLocationProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ProvidedStorageLocationProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeIDProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeIDProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * IP address
     * 
* * required string ipAddr = 1; * @return Whether the ipAddr field is set. */ boolean hasIpAddr(); /** *
     * IP address
     * 
* * required string ipAddr = 1; * @return The ipAddr. */ java.lang.String getIpAddr(); /** *
     * IP address
     * 
* * required string ipAddr = 1; * @return The bytes for ipAddr. */ org.apache.hadoop.thirdparty.protobuf.ByteString getIpAddrBytes(); /** *
     * hostname
     * 
* * required string hostName = 2; * @return Whether the hostName field is set. */ boolean hasHostName(); /** *
     * hostname
     * 
* * required string hostName = 2; * @return The hostName. */ java.lang.String getHostName(); /** *
     * hostname
     * 
* * required string hostName = 2; * @return The bytes for hostName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getHostNameBytes(); /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; * @return Whether the datanodeUuid field is set. */ boolean hasDatanodeUuid(); /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; * @return The datanodeUuid. */ java.lang.String getDatanodeUuid(); /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; * @return The bytes for datanodeUuid. */ org.apache.hadoop.thirdparty.protobuf.ByteString getDatanodeUuidBytes(); /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; * @return Whether the xferPort field is set. */ boolean hasXferPort(); /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; * @return The xferPort. */ int getXferPort(); /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; * @return Whether the infoPort field is set. */ boolean hasInfoPort(); /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; * @return The infoPort. */ int getInfoPort(); /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; * @return Whether the ipcPort field is set. */ boolean hasIpcPort(); /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; * @return The ipcPort. */ int getIpcPort(); /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @return Whether the infoSecurePort field is set. */ boolean hasInfoSecurePort(); /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @return The infoSecurePort. */ int getInfoSecurePort(); } /** *
   **
   * Identifies a Datanode
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeIDProto} */ public static final class DatanodeIDProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeIDProto) DatanodeIDProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeIDProto.newBuilder() to construct. private DatanodeIDProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeIDProto() { ipAddr_ = ""; hostName_ = ""; datanodeUuid_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DatanodeIDProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); } private int bitField0_; public static final int IPADDR_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object ipAddr_ = ""; /** *
     * IP address
     * 
* * required string ipAddr = 1; * @return Whether the ipAddr field is set. */ @java.lang.Override public boolean hasIpAddr() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * IP address
     * 
* * required string ipAddr = 1; * @return The ipAddr. */ @java.lang.Override public java.lang.String getIpAddr() { java.lang.Object ref = ipAddr_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ipAddr_ = s; } return s; } } /** *
     * IP address
     * 
* * required string ipAddr = 1; * @return The bytes for ipAddr. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getIpAddrBytes() { java.lang.Object ref = ipAddr_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ipAddr_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int HOSTNAME_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object hostName_ = ""; /** *
     * hostname
     * 
* * required string hostName = 2; * @return Whether the hostName field is set. */ @java.lang.Override public boolean hasHostName() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * hostname
     * 
* * required string hostName = 2; * @return The hostName. */ @java.lang.Override public java.lang.String getHostName() { java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { hostName_ = s; } return s; } } /** *
     * hostname
     * 
* * required string hostName = 2; * @return The bytes for hostName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getHostNameBytes() { java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); hostName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int DATANODEUUID_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object datanodeUuid_ = ""; /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; * @return Whether the datanodeUuid field is set. */ @java.lang.Override public boolean hasDatanodeUuid() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; * @return The datanodeUuid. */ @java.lang.Override public java.lang.String getDatanodeUuid() { java.lang.Object ref = datanodeUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { datanodeUuid_ = s; } return s; } } /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; * @return The bytes for datanodeUuid. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getDatanodeUuidBytes() { java.lang.Object ref = datanodeUuid_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); datanodeUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int XFERPORT_FIELD_NUMBER = 4; private int xferPort_ = 0; /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; * @return Whether the xferPort field is set. */ @java.lang.Override public boolean hasXferPort() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; * @return The xferPort. */ @java.lang.Override public int getXferPort() { return xferPort_; } public static final int INFOPORT_FIELD_NUMBER = 5; private int infoPort_ = 0; /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; * @return Whether the infoPort field is set. */ @java.lang.Override public boolean hasInfoPort() { return ((bitField0_ & 0x00000010) != 0); } /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; * @return The infoPort. */ @java.lang.Override public int getInfoPort() { return infoPort_; } public static final int IPCPORT_FIELD_NUMBER = 6; private int ipcPort_ = 0; /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; * @return Whether the ipcPort field is set. */ @java.lang.Override public boolean hasIpcPort() { return ((bitField0_ & 0x00000020) != 0); } /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; * @return The ipcPort. */ @java.lang.Override public int getIpcPort() { return ipcPort_; } public static final int INFOSECUREPORT_FIELD_NUMBER = 7; private int infoSecurePort_ = 0; /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @return Whether the infoSecurePort field is set. */ @java.lang.Override public boolean hasInfoSecurePort() { return ((bitField0_ & 0x00000040) != 0); } /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @return The infoSecurePort. */ @java.lang.Override public int getInfoSecurePort() { return infoSecurePort_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasIpAddr()) { memoizedIsInitialized = 0; return false; } if (!hasHostName()) { memoizedIsInitialized = 0; return false; } if (!hasDatanodeUuid()) { memoizedIsInitialized = 0; return false; } if (!hasXferPort()) { memoizedIsInitialized = 0; return false; } if (!hasInfoPort()) { memoizedIsInitialized = 0; return false; } if (!hasIpcPort()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, ipAddr_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, hostName_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, datanodeUuid_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, xferPort_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt32(5, infoPort_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt32(6, ipcPort_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt32(7, infoSecurePort_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, ipAddr_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, hostName_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, datanodeUuid_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, xferPort_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(5, infoPort_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(6, ipcPort_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(7, infoSecurePort_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) obj; if (hasIpAddr() != other.hasIpAddr()) return false; if (hasIpAddr()) { if (!getIpAddr() .equals(other.getIpAddr())) return false; } if (hasHostName() != other.hasHostName()) return false; if (hasHostName()) { if (!getHostName() .equals(other.getHostName())) return false; } if (hasDatanodeUuid() != other.hasDatanodeUuid()) return false; if (hasDatanodeUuid()) { if (!getDatanodeUuid() .equals(other.getDatanodeUuid())) return false; } if (hasXferPort() != other.hasXferPort()) return false; if (hasXferPort()) { if (getXferPort() != other.getXferPort()) return false; } if (hasInfoPort() != other.hasInfoPort()) return false; if (hasInfoPort()) { if (getInfoPort() != other.getInfoPort()) return false; } if (hasIpcPort() != other.hasIpcPort()) return false; if (hasIpcPort()) { if (getIpcPort() != other.getIpcPort()) return false; } if (hasInfoSecurePort() != other.hasInfoSecurePort()) return false; if (hasInfoSecurePort()) { if (getInfoSecurePort() != other.getInfoSecurePort()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasIpAddr()) { hash = (37 * hash) + IPADDR_FIELD_NUMBER; hash = (53 * hash) + getIpAddr().hashCode(); } if (hasHostName()) { hash = (37 * hash) + HOSTNAME_FIELD_NUMBER; hash = (53 * hash) + getHostName().hashCode(); } if (hasDatanodeUuid()) { hash = (37 * hash) + DATANODEUUID_FIELD_NUMBER; hash = (53 * hash) + getDatanodeUuid().hashCode(); } if (hasXferPort()) { hash = (37 * hash) + XFERPORT_FIELD_NUMBER; hash = (53 * hash) + getXferPort(); } if (hasInfoPort()) { hash = (37 * hash) + INFOPORT_FIELD_NUMBER; hash = (53 * hash) + getInfoPort(); } if (hasIpcPort()) { hash = (37 * hash) + IPCPORT_FIELD_NUMBER; hash = (53 * hash) + getIpcPort(); } if (hasInfoSecurePort()) { hash = (37 * hash) + INFOSECUREPORT_FIELD_NUMBER; hash = (53 * hash) + getInfoSecurePort(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Identifies a Datanode
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeIDProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeIDProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; ipAddr_ = ""; hostName_ = ""; datanodeUuid_ = ""; xferPort_ = 0; infoPort_ = 0; ipcPort_ = 0; infoSecurePort_ = 0; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.ipAddr_ = ipAddr_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.hostName_ = hostName_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.datanodeUuid_ = datanodeUuid_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.xferPort_ = xferPort_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.infoPort_ = infoPort_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.ipcPort_ = ipcPort_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.infoSecurePort_ = infoSecurePort_; to_bitField0_ |= 0x00000040; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) return this; if (other.hasIpAddr()) { ipAddr_ = other.ipAddr_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasHostName()) { hostName_ = other.hostName_; bitField0_ |= 0x00000002; onChanged(); } if (other.hasDatanodeUuid()) { datanodeUuid_ = other.datanodeUuid_; bitField0_ |= 0x00000004; onChanged(); } if (other.hasXferPort()) { setXferPort(other.getXferPort()); } if (other.hasInfoPort()) { setInfoPort(other.getInfoPort()); } if (other.hasIpcPort()) { setIpcPort(other.getIpcPort()); } if (other.hasInfoSecurePort()) { setInfoSecurePort(other.getInfoSecurePort()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasIpAddr()) { return false; } if (!hasHostName()) { return false; } if (!hasDatanodeUuid()) { return false; } if (!hasXferPort()) { return false; } if (!hasInfoPort()) { return false; } if (!hasIpcPort()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { ipAddr_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { hostName_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { datanodeUuid_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 32: { xferPort_ = input.readUInt32(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { infoPort_ = input.readUInt32(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { ipcPort_ = input.readUInt32(); bitField0_ |= 0x00000020; break; } // case 48 case 56: { infoSecurePort_ = input.readUInt32(); bitField0_ |= 0x00000040; break; } // case 56 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object ipAddr_ = ""; /** *
       * IP address
       * 
* * required string ipAddr = 1; * @return Whether the ipAddr field is set. */ public boolean hasIpAddr() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * IP address
       * 
* * required string ipAddr = 1; * @return The ipAddr. */ public java.lang.String getIpAddr() { java.lang.Object ref = ipAddr_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ipAddr_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * IP address
       * 
* * required string ipAddr = 1; * @return The bytes for ipAddr. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getIpAddrBytes() { java.lang.Object ref = ipAddr_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ipAddr_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * IP address
       * 
* * required string ipAddr = 1; * @param value The ipAddr to set. * @return This builder for chaining. */ public Builder setIpAddr( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ipAddr_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** *
       * IP address
       * 
* * required string ipAddr = 1; * @return This builder for chaining. */ public Builder clearIpAddr() { ipAddr_ = getDefaultInstance().getIpAddr(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** *
       * IP address
       * 
* * required string ipAddr = 1; * @param value The bytes for ipAddr to set. * @return This builder for chaining. */ public Builder setIpAddrBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ipAddr_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private java.lang.Object hostName_ = ""; /** *
       * hostname
       * 
* * required string hostName = 2; * @return Whether the hostName field is set. */ public boolean hasHostName() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * hostname
       * 
* * required string hostName = 2; * @return The hostName. */ public java.lang.String getHostName() { java.lang.Object ref = hostName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { hostName_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * hostname
       * 
* * required string hostName = 2; * @return The bytes for hostName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getHostNameBytes() { java.lang.Object ref = hostName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); hostName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * hostname
       * 
* * required string hostName = 2; * @param value The hostName to set. * @return This builder for chaining. */ public Builder setHostName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } hostName_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** *
       * hostname
       * 
* * required string hostName = 2; * @return This builder for chaining. */ public Builder clearHostName() { hostName_ = getDefaultInstance().getHostName(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** *
       * hostname
       * 
* * required string hostName = 2; * @param value The bytes for hostName to set. * @return This builder for chaining. */ public Builder setHostNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } hostName_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private java.lang.Object datanodeUuid_ = ""; /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; * @return Whether the datanodeUuid field is set. */ public boolean hasDatanodeUuid() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; * @return The datanodeUuid. */ public java.lang.String getDatanodeUuid() { java.lang.Object ref = datanodeUuid_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { datanodeUuid_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; * @return The bytes for datanodeUuid. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getDatanodeUuidBytes() { java.lang.Object ref = datanodeUuid_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); datanodeUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; * @param value The datanodeUuid to set. * @return This builder for chaining. */ public Builder setDatanodeUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } datanodeUuid_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; * @return This builder for chaining. */ public Builder clearDatanodeUuid() { datanodeUuid_ = getDefaultInstance().getDatanodeUuid(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; * @param value The bytes for datanodeUuid to set. * @return This builder for chaining. */ public Builder setDatanodeUuidBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } datanodeUuid_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } private int xferPort_ ; /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; * @return Whether the xferPort field is set. */ @java.lang.Override public boolean hasXferPort() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; * @return The xferPort. */ @java.lang.Override public int getXferPort() { return xferPort_; } /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; * @param value The xferPort to set. * @return This builder for chaining. */ public Builder setXferPort(int value) { xferPort_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; * @return This builder for chaining. */ public Builder clearXferPort() { bitField0_ = (bitField0_ & ~0x00000008); xferPort_ = 0; onChanged(); return this; } private int infoPort_ ; /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; * @return Whether the infoPort field is set. */ @java.lang.Override public boolean hasInfoPort() { return ((bitField0_ & 0x00000010) != 0); } /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; * @return The infoPort. */ @java.lang.Override public int getInfoPort() { return infoPort_; } /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; * @param value The infoPort to set. * @return This builder for chaining. */ public Builder setInfoPort(int value) { infoPort_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; * @return This builder for chaining. */ public Builder clearInfoPort() { bitField0_ = (bitField0_ & ~0x00000010); infoPort_ = 0; onChanged(); return this; } private int ipcPort_ ; /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; * @return Whether the ipcPort field is set. */ @java.lang.Override public boolean hasIpcPort() { return ((bitField0_ & 0x00000020) != 0); } /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; * @return The ipcPort. */ @java.lang.Override public int getIpcPort() { return ipcPort_; } /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; * @param value The ipcPort to set. * @return This builder for chaining. */ public Builder setIpcPort(int value) { ipcPort_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; * @return This builder for chaining. */ public Builder clearIpcPort() { bitField0_ = (bitField0_ & ~0x00000020); ipcPort_ = 0; onChanged(); return this; } private int infoSecurePort_ ; /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @return Whether the infoSecurePort field is set. */ @java.lang.Override public boolean hasInfoSecurePort() { return ((bitField0_ & 0x00000040) != 0); } /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @return The infoSecurePort. */ @java.lang.Override public int getInfoSecurePort() { return infoSecurePort_; } /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @param value The infoSecurePort to set. * @return This builder for chaining. */ public Builder setInfoSecurePort(int value) { infoSecurePort_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; * @return This builder for chaining. */ public Builder clearInfoSecurePort() { bitField0_ = (bitField0_ & ~0x00000040); infoSecurePort_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeIDProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeIDProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeIDProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeLocalInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeLocalInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string softwareVersion = 1; * @return Whether the softwareVersion field is set. */ boolean hasSoftwareVersion(); /** * required string softwareVersion = 1; * @return The softwareVersion. */ java.lang.String getSoftwareVersion(); /** * required string softwareVersion = 1; * @return The bytes for softwareVersion. */ org.apache.hadoop.thirdparty.protobuf.ByteString getSoftwareVersionBytes(); /** * required string configVersion = 2; * @return Whether the configVersion field is set. */ boolean hasConfigVersion(); /** * required string configVersion = 2; * @return The configVersion. */ java.lang.String getConfigVersion(); /** * required string configVersion = 2; * @return The bytes for configVersion. */ org.apache.hadoop.thirdparty.protobuf.ByteString getConfigVersionBytes(); /** * required uint64 uptime = 3; * @return Whether the uptime field is set. */ boolean hasUptime(); /** * required uint64 uptime = 3; * @return The uptime. */ long getUptime(); } /** *
   **
   * Datanode local information
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto} */ public static final class DatanodeLocalInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeLocalInfoProto) DatanodeLocalInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeLocalInfoProto.newBuilder() to construct. private DatanodeLocalInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeLocalInfoProto() { softwareVersion_ = ""; configVersion_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DatanodeLocalInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class); } private int bitField0_; public static final int SOFTWAREVERSION_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object softwareVersion_ = ""; /** * required string softwareVersion = 1; * @return Whether the softwareVersion field is set. */ @java.lang.Override public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000001) != 0); } /** * required string softwareVersion = 1; * @return The softwareVersion. */ @java.lang.Override public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { softwareVersion_ = s; } return s; } } /** * required string softwareVersion = 1; * @return The bytes for softwareVersion. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int CONFIGVERSION_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object configVersion_ = ""; /** * required string configVersion = 2; * @return Whether the configVersion field is set. */ @java.lang.Override public boolean hasConfigVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required string configVersion = 2; * @return The configVersion. */ @java.lang.Override public java.lang.String getConfigVersion() { java.lang.Object ref = configVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { configVersion_ = s; } return s; } } /** * required string configVersion = 2; * @return The bytes for configVersion. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getConfigVersionBytes() { java.lang.Object ref = configVersion_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); configVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int UPTIME_FIELD_NUMBER = 3; private long uptime_ = 0L; /** * required uint64 uptime = 3; * @return Whether the uptime field is set. */ @java.lang.Override public boolean hasUptime() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 uptime = 3; * @return The uptime. */ @java.lang.Override public long getUptime() { return uptime_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSoftwareVersion()) { memoizedIsInitialized = 0; return false; } if (!hasConfigVersion()) { memoizedIsInitialized = 0; return false; } if (!hasUptime()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, softwareVersion_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, configVersion_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, uptime_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, softwareVersion_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, configVersion_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, uptime_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) obj; if (hasSoftwareVersion() != other.hasSoftwareVersion()) return false; if (hasSoftwareVersion()) { if (!getSoftwareVersion() .equals(other.getSoftwareVersion())) return false; } if (hasConfigVersion() != other.hasConfigVersion()) return false; if (hasConfigVersion()) { if (!getConfigVersion() .equals(other.getConfigVersion())) return false; } if (hasUptime() != other.hasUptime()) return false; if (hasUptime()) { if (getUptime() != other.getUptime()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSoftwareVersion()) { hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER; hash = (53 * hash) + getSoftwareVersion().hashCode(); } if (hasConfigVersion()) { hash = (37 * hash) + CONFIGVERSION_FIELD_NUMBER; hash = (53 * hash) + getConfigVersion().hashCode(); } if (hasUptime()) { hash = (37 * hash) + UPTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getUptime()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Datanode local information
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeLocalInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; softwareVersion_ = ""; configVersion_ = ""; uptime_ = 0L; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.softwareVersion_ = softwareVersion_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.configVersion_ = configVersion_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.uptime_ = uptime_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) return this; if (other.hasSoftwareVersion()) { softwareVersion_ = other.softwareVersion_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasConfigVersion()) { configVersion_ = other.configVersion_; bitField0_ |= 0x00000002; onChanged(); } if (other.hasUptime()) { setUptime(other.getUptime()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSoftwareVersion()) { return false; } if (!hasConfigVersion()) { return false; } if (!hasUptime()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { softwareVersion_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { configVersion_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 24: { uptime_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object softwareVersion_ = ""; /** * required string softwareVersion = 1; * @return Whether the softwareVersion field is set. */ public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000001) != 0); } /** * required string softwareVersion = 1; * @return The softwareVersion. */ public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { softwareVersion_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string softwareVersion = 1; * @return The bytes for softwareVersion. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string softwareVersion = 1; * @param value The softwareVersion to set. * @return This builder for chaining. */ public Builder setSoftwareVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } softwareVersion_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string softwareVersion = 1; * @return This builder for chaining. */ public Builder clearSoftwareVersion() { softwareVersion_ = getDefaultInstance().getSoftwareVersion(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string softwareVersion = 1; * @param value The bytes for softwareVersion to set. * @return This builder for chaining. */ public Builder setSoftwareVersionBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } softwareVersion_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private java.lang.Object configVersion_ = ""; /** * required string configVersion = 2; * @return Whether the configVersion field is set. */ public boolean hasConfigVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required string configVersion = 2; * @return The configVersion. */ public java.lang.String getConfigVersion() { java.lang.Object ref = configVersion_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { configVersion_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string configVersion = 2; * @return The bytes for configVersion. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getConfigVersionBytes() { java.lang.Object ref = configVersion_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); configVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string configVersion = 2; * @param value The configVersion to set. * @return This builder for chaining. */ public Builder setConfigVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } configVersion_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string configVersion = 2; * @return This builder for chaining. */ public Builder clearConfigVersion() { configVersion_ = getDefaultInstance().getConfigVersion(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string configVersion = 2; * @param value The bytes for configVersion to set. * @return This builder for chaining. */ public Builder setConfigVersionBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } configVersion_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private long uptime_ ; /** * required uint64 uptime = 3; * @return Whether the uptime field is set. */ @java.lang.Override public boolean hasUptime() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 uptime = 3; * @return The uptime. */ @java.lang.Override public long getUptime() { return uptime_; } /** * required uint64 uptime = 3; * @param value The uptime to set. * @return This builder for chaining. */ public Builder setUptime(long value) { uptime_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint64 uptime = 3; * @return This builder for chaining. */ public Builder clearUptime() { bitField0_ = (bitField0_ & ~0x00000004); uptime_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeLocalInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeLocalInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeLocalInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeVolumeInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeVolumeInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string path = 1; * @return Whether the path field is set. */ boolean hasPath(); /** * required string path = 1; * @return The path. */ java.lang.String getPath(); /** * required string path = 1; * @return The bytes for path. */ org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes(); /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @return Whether the storageType field is set. */ boolean hasStorageType(); /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @return The storageType. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType(); /** * required uint64 usedSpace = 3; * @return Whether the usedSpace field is set. */ boolean hasUsedSpace(); /** * required uint64 usedSpace = 3; * @return The usedSpace. */ long getUsedSpace(); /** * required uint64 freeSpace = 4; * @return Whether the freeSpace field is set. */ boolean hasFreeSpace(); /** * required uint64 freeSpace = 4; * @return The freeSpace. */ long getFreeSpace(); /** * required uint64 reservedSpace = 5; * @return Whether the reservedSpace field is set. */ boolean hasReservedSpace(); /** * required uint64 reservedSpace = 5; * @return The reservedSpace. */ long getReservedSpace(); /** * required uint64 reservedSpaceForReplicas = 6; * @return Whether the reservedSpaceForReplicas field is set. */ boolean hasReservedSpaceForReplicas(); /** * required uint64 reservedSpaceForReplicas = 6; * @return The reservedSpaceForReplicas. */ long getReservedSpaceForReplicas(); /** * required uint64 numBlocks = 7; * @return Whether the numBlocks field is set. */ boolean hasNumBlocks(); /** * required uint64 numBlocks = 7; * @return The numBlocks. */ long getNumBlocks(); } /** *
   **
   * Datanode volume information
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto} */ public static final class DatanodeVolumeInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeVolumeInfoProto) DatanodeVolumeInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeVolumeInfoProto.newBuilder() to construct. private DatanodeVolumeInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeVolumeInfoProto() { path_ = ""; storageType_ = 1; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DatanodeVolumeInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class); } private int bitField0_; public static final int PATH_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object path_ = ""; /** * required string path = 1; * @return Whether the path field is set. */ @java.lang.Override public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; * @return The path. */ @java.lang.Override public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; * @return The bytes for path. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int STORAGETYPE_FIELD_NUMBER = 2; private int storageType_ = 1; /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @return Whether the storageType field is set. */ @java.lang.Override public boolean hasStorageType() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @return The storageType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } public static final int USEDSPACE_FIELD_NUMBER = 3; private long usedSpace_ = 0L; /** * required uint64 usedSpace = 3; * @return Whether the usedSpace field is set. */ @java.lang.Override public boolean hasUsedSpace() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 usedSpace = 3; * @return The usedSpace. */ @java.lang.Override public long getUsedSpace() { return usedSpace_; } public static final int FREESPACE_FIELD_NUMBER = 4; private long freeSpace_ = 0L; /** * required uint64 freeSpace = 4; * @return Whether the freeSpace field is set. */ @java.lang.Override public boolean hasFreeSpace() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 freeSpace = 4; * @return The freeSpace. */ @java.lang.Override public long getFreeSpace() { return freeSpace_; } public static final int RESERVEDSPACE_FIELD_NUMBER = 5; private long reservedSpace_ = 0L; /** * required uint64 reservedSpace = 5; * @return Whether the reservedSpace field is set. */ @java.lang.Override public boolean hasReservedSpace() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 reservedSpace = 5; * @return The reservedSpace. */ @java.lang.Override public long getReservedSpace() { return reservedSpace_; } public static final int RESERVEDSPACEFORREPLICAS_FIELD_NUMBER = 6; private long reservedSpaceForReplicas_ = 0L; /** * required uint64 reservedSpaceForReplicas = 6; * @return Whether the reservedSpaceForReplicas field is set. */ @java.lang.Override public boolean hasReservedSpaceForReplicas() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 reservedSpaceForReplicas = 6; * @return The reservedSpaceForReplicas. */ @java.lang.Override public long getReservedSpaceForReplicas() { return reservedSpaceForReplicas_; } public static final int NUMBLOCKS_FIELD_NUMBER = 7; private long numBlocks_ = 0L; /** * required uint64 numBlocks = 7; * @return Whether the numBlocks field is set. */ @java.lang.Override public boolean hasNumBlocks() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 numBlocks = 7; * @return The numBlocks. */ @java.lang.Override public long getNumBlocks() { return numBlocks_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasStorageType()) { memoizedIsInitialized = 0; return false; } if (!hasUsedSpace()) { memoizedIsInitialized = 0; return false; } if (!hasFreeSpace()) { memoizedIsInitialized = 0; return false; } if (!hasReservedSpace()) { memoizedIsInitialized = 0; return false; } if (!hasReservedSpaceForReplicas()) { memoizedIsInitialized = 0; return false; } if (!hasNumBlocks()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, storageType_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, usedSpace_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, freeSpace_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, reservedSpace_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, reservedSpaceForReplicas_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt64(7, numBlocks_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, storageType_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, usedSpace_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, freeSpace_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, reservedSpace_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, reservedSpaceForReplicas_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(7, numBlocks_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) obj; if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (hasStorageType() != other.hasStorageType()) return false; if (hasStorageType()) { if (storageType_ != other.storageType_) return false; } if (hasUsedSpace() != other.hasUsedSpace()) return false; if (hasUsedSpace()) { if (getUsedSpace() != other.getUsedSpace()) return false; } if (hasFreeSpace() != other.hasFreeSpace()) return false; if (hasFreeSpace()) { if (getFreeSpace() != other.getFreeSpace()) return false; } if (hasReservedSpace() != other.hasReservedSpace()) return false; if (hasReservedSpace()) { if (getReservedSpace() != other.getReservedSpace()) return false; } if (hasReservedSpaceForReplicas() != other.hasReservedSpaceForReplicas()) return false; if (hasReservedSpaceForReplicas()) { if (getReservedSpaceForReplicas() != other.getReservedSpaceForReplicas()) return false; } if (hasNumBlocks() != other.hasNumBlocks()) return false; if (hasNumBlocks()) { if (getNumBlocks() != other.getNumBlocks()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasStorageType()) { hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER; hash = (53 * hash) + storageType_; } if (hasUsedSpace()) { hash = (37 * hash) + USEDSPACE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getUsedSpace()); } if (hasFreeSpace()) { hash = (37 * hash) + FREESPACE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFreeSpace()); } if (hasReservedSpace()) { hash = (37 * hash) + RESERVEDSPACE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getReservedSpace()); } if (hasReservedSpaceForReplicas()) { hash = (37 * hash) + RESERVEDSPACEFORREPLICAS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getReservedSpaceForReplicas()); } if (hasNumBlocks()) { hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumBlocks()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Datanode volume information
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeVolumeInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; path_ = ""; storageType_ = 1; usedSpace_ = 0L; freeSpace_ = 0L; reservedSpace_ = 0L; reservedSpaceForReplicas_ = 0L; numBlocks_ = 0L; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.path_ = path_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.storageType_ = storageType_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.usedSpace_ = usedSpace_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.freeSpace_ = freeSpace_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.reservedSpace_ = reservedSpace_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.reservedSpaceForReplicas_ = reservedSpaceForReplicas_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.numBlocks_ = numBlocks_; to_bitField0_ |= 0x00000040; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance()) return this; if (other.hasPath()) { path_ = other.path_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasStorageType()) { setStorageType(other.getStorageType()); } if (other.hasUsedSpace()) { setUsedSpace(other.getUsedSpace()); } if (other.hasFreeSpace()) { setFreeSpace(other.getFreeSpace()); } if (other.hasReservedSpace()) { setReservedSpace(other.getReservedSpace()); } if (other.hasReservedSpaceForReplicas()) { setReservedSpaceForReplicas(other.getReservedSpaceForReplicas()); } if (other.hasNumBlocks()) { setNumBlocks(other.getNumBlocks()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPath()) { return false; } if (!hasStorageType()) { return false; } if (!hasUsedSpace()) { return false; } if (!hasFreeSpace()) { return false; } if (!hasReservedSpace()) { return false; } if (!hasReservedSpaceForReplicas()) { return false; } if (!hasNumBlocks()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { path_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(2, tmpRaw); } else { storageType_ = tmpRaw; bitField0_ |= 0x00000002; } break; } // case 16 case 24: { usedSpace_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { freeSpace_ = input.readUInt64(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { reservedSpace_ = input.readUInt64(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { reservedSpaceForReplicas_ = input.readUInt64(); bitField0_ |= 0x00000020; break; } // case 48 case 56: { numBlocks_ = input.readUInt64(); bitField0_ |= 0x00000040; break; } // case 56 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object path_ = ""; /** * required string path = 1; * @return Whether the path field is set. */ public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; * @return The path. */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; * @return The bytes for path. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string path = 1; * @param value The path to set. * @return This builder for chaining. */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } path_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string path = 1; * @return This builder for chaining. */ public Builder clearPath() { path_ = getDefaultInstance().getPath(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string path = 1; * @param value The bytes for path to set. * @return This builder for chaining. */ public Builder setPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } path_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private int storageType_ = 1; /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @return Whether the storageType field is set. */ @java.lang.Override public boolean hasStorageType() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @return The storageType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @param value The storageType to set. * @return This builder for chaining. */ public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; storageType_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; * @return This builder for chaining. */ public Builder clearStorageType() { bitField0_ = (bitField0_ & ~0x00000002); storageType_ = 1; onChanged(); return this; } private long usedSpace_ ; /** * required uint64 usedSpace = 3; * @return Whether the usedSpace field is set. */ @java.lang.Override public boolean hasUsedSpace() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 usedSpace = 3; * @return The usedSpace. */ @java.lang.Override public long getUsedSpace() { return usedSpace_; } /** * required uint64 usedSpace = 3; * @param value The usedSpace to set. * @return This builder for chaining. */ public Builder setUsedSpace(long value) { usedSpace_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint64 usedSpace = 3; * @return This builder for chaining. */ public Builder clearUsedSpace() { bitField0_ = (bitField0_ & ~0x00000004); usedSpace_ = 0L; onChanged(); return this; } private long freeSpace_ ; /** * required uint64 freeSpace = 4; * @return Whether the freeSpace field is set. */ @java.lang.Override public boolean hasFreeSpace() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 freeSpace = 4; * @return The freeSpace. */ @java.lang.Override public long getFreeSpace() { return freeSpace_; } /** * required uint64 freeSpace = 4; * @param value The freeSpace to set. * @return This builder for chaining. */ public Builder setFreeSpace(long value) { freeSpace_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required uint64 freeSpace = 4; * @return This builder for chaining. */ public Builder clearFreeSpace() { bitField0_ = (bitField0_ & ~0x00000008); freeSpace_ = 0L; onChanged(); return this; } private long reservedSpace_ ; /** * required uint64 reservedSpace = 5; * @return Whether the reservedSpace field is set. */ @java.lang.Override public boolean hasReservedSpace() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 reservedSpace = 5; * @return The reservedSpace. */ @java.lang.Override public long getReservedSpace() { return reservedSpace_; } /** * required uint64 reservedSpace = 5; * @param value The reservedSpace to set. * @return This builder for chaining. */ public Builder setReservedSpace(long value) { reservedSpace_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required uint64 reservedSpace = 5; * @return This builder for chaining. */ public Builder clearReservedSpace() { bitField0_ = (bitField0_ & ~0x00000010); reservedSpace_ = 0L; onChanged(); return this; } private long reservedSpaceForReplicas_ ; /** * required uint64 reservedSpaceForReplicas = 6; * @return Whether the reservedSpaceForReplicas field is set. */ @java.lang.Override public boolean hasReservedSpaceForReplicas() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 reservedSpaceForReplicas = 6; * @return The reservedSpaceForReplicas. */ @java.lang.Override public long getReservedSpaceForReplicas() { return reservedSpaceForReplicas_; } /** * required uint64 reservedSpaceForReplicas = 6; * @param value The reservedSpaceForReplicas to set. * @return This builder for chaining. */ public Builder setReservedSpaceForReplicas(long value) { reservedSpaceForReplicas_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * required uint64 reservedSpaceForReplicas = 6; * @return This builder for chaining. */ public Builder clearReservedSpaceForReplicas() { bitField0_ = (bitField0_ & ~0x00000020); reservedSpaceForReplicas_ = 0L; onChanged(); return this; } private long numBlocks_ ; /** * required uint64 numBlocks = 7; * @return Whether the numBlocks field is set. */ @java.lang.Override public boolean hasNumBlocks() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 numBlocks = 7; * @return The numBlocks. */ @java.lang.Override public long getNumBlocks() { return numBlocks_; } /** * required uint64 numBlocks = 7; * @param value The numBlocks to set. * @return This builder for chaining. */ public Builder setNumBlocks(long value) { numBlocks_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * required uint64 numBlocks = 7; * @return This builder for chaining. */ public Builder clearNumBlocks() { bitField0_ = (bitField0_ & ~0x00000040); numBlocks_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeVolumeInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeVolumeInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeVolumeInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeInfosProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeInfosProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ java.util.List getDatanodesList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ int getDatanodesCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ java.util.List getDatanodesOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index); } /** *
   **
   * DatanodeInfo array
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto} */ public static final class DatanodeInfosProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeInfosProto) DatanodeInfosProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeInfosProto.newBuilder() to construct. private DatanodeInfosProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeInfosProto() { datanodes_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DatanodeInfosProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class); } public static final int DATANODES_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List datanodes_; /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ @java.lang.Override public java.util.List getDatanodesList() { return datanodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ @java.lang.Override public java.util.List getDatanodesOrBuilderList() { return datanodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ @java.lang.Override public int getDatanodesCount() { return datanodes_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { return datanodes_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index) { return datanodes_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getDatanodesCount(); i++) { if (!getDatanodes(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < datanodes_.size(); i++) { output.writeMessage(1, datanodes_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < datanodes_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, datanodes_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) obj; if (!getDatanodesList() .equals(other.getDatanodesList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getDatanodesCount() > 0) { hash = (37 * hash) + DATANODES_FIELD_NUMBER; hash = (53 * hash) + getDatanodesList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * DatanodeInfo array
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeInfosProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (datanodesBuilder_ == null) { datanodes_ = java.util.Collections.emptyList(); } else { datanodes_ = null; datanodesBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result) { if (datanodesBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { datanodes_ = java.util.Collections.unmodifiableList(datanodes_); bitField0_ = (bitField0_ & ~0x00000001); } result.datanodes_ = datanodes_; } else { result.datanodes_ = datanodesBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result) { int from_bitField0_ = bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) return this; if (datanodesBuilder_ == null) { if (!other.datanodes_.isEmpty()) { if (datanodes_.isEmpty()) { datanodes_ = other.datanodes_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureDatanodesIsMutable(); datanodes_.addAll(other.datanodes_); } onChanged(); } } else { if (!other.datanodes_.isEmpty()) { if (datanodesBuilder_.isEmpty()) { datanodesBuilder_.dispose(); datanodesBuilder_ = null; datanodes_ = other.datanodes_; bitField0_ = (bitField0_ & ~0x00000001); datanodesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getDatanodesFieldBuilder() : null; } else { datanodesBuilder_.addAllMessages(other.datanodes_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getDatanodesCount(); i++) { if (!getDatanodes(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry); if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.add(m); } else { datanodesBuilder_.addMessage(m); } break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List datanodes_ = java.util.Collections.emptyList(); private void ensureDatanodesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { datanodes_ = new java.util.ArrayList(datanodes_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodesBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesList() { if (datanodesBuilder_ == null) { return java.util.Collections.unmodifiableList(datanodes_); } else { return datanodesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public int getDatanodesCount() { if (datanodesBuilder_ == null) { return datanodes_.size(); } else { return datanodesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { if (datanodesBuilder_ == null) { return datanodes_.get(index); } else { return datanodesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder setDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.set(index, value); onChanged(); } else { datanodesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder setDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.set(index, builderForValue.build()); onChanged(); } else { datanodesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.add(value); onChanged(); } else { datanodesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.add(index, value); onChanged(); } else { datanodesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.add(builderForValue.build()); onChanged(); } else { datanodesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.add(index, builderForValue.build()); onChanged(); } else { datanodesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addAllDatanodes( java.lang.Iterable values) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, datanodes_); onChanged(); } else { datanodesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder clearDatanodes() { if (datanodesBuilder_ == null) { datanodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { datanodesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder removeDatanodes(int index) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.remove(index); onChanged(); } else { datanodesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodesBuilder( int index) { return getDatanodesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index) { if (datanodesBuilder_ == null) { return datanodes_.get(index); } else { return datanodesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesOrBuilderList() { if (datanodesBuilder_ != null) { return datanodesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(datanodes_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder() { return getDatanodesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder( int index) { return getDatanodesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesBuilderList() { return getDatanodesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getDatanodesFieldBuilder() { if (datanodesBuilder_ == null) { datanodesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( datanodes_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); datanodes_ = null; } return datanodesBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfosProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfosProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeInfosProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.DatanodeIDProto id = 1; * @return Whether the id field is set. */ boolean hasId(); /** * required .hadoop.hdfs.DatanodeIDProto id = 1; * @return The id. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId(); /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder(); /** * optional uint64 capacity = 2 [default = 0]; * @return Whether the capacity field is set. */ boolean hasCapacity(); /** * optional uint64 capacity = 2 [default = 0]; * @return The capacity. */ long getCapacity(); /** * optional uint64 dfsUsed = 3 [default = 0]; * @return Whether the dfsUsed field is set. */ boolean hasDfsUsed(); /** * optional uint64 dfsUsed = 3 [default = 0]; * @return The dfsUsed. */ long getDfsUsed(); /** * optional uint64 remaining = 4 [default = 0]; * @return Whether the remaining field is set. */ boolean hasRemaining(); /** * optional uint64 remaining = 4 [default = 0]; * @return The remaining. */ long getRemaining(); /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @return Whether the blockPoolUsed field is set. */ boolean hasBlockPoolUsed(); /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @return The blockPoolUsed. */ long getBlockPoolUsed(); /** * optional uint64 lastUpdate = 6 [default = 0]; * @return Whether the lastUpdate field is set. */ boolean hasLastUpdate(); /** * optional uint64 lastUpdate = 6 [default = 0]; * @return The lastUpdate. */ long getLastUpdate(); /** * optional uint32 xceiverCount = 7 [default = 0]; * @return Whether the xceiverCount field is set. */ boolean hasXceiverCount(); /** * optional uint32 xceiverCount = 7 [default = 0]; * @return The xceiverCount. */ int getXceiverCount(); /** * optional string location = 8; * @return Whether the location field is set. */ boolean hasLocation(); /** * optional string location = 8; * @return The location. */ java.lang.String getLocation(); /** * optional string location = 8; * @return The bytes for location. */ org.apache.hadoop.thirdparty.protobuf.ByteString getLocationBytes(); /** * optional uint64 nonDfsUsed = 9; * @return Whether the nonDfsUsed field is set. */ boolean hasNonDfsUsed(); /** * optional uint64 nonDfsUsed = 9; * @return The nonDfsUsed. */ long getNonDfsUsed(); /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @return Whether the adminState field is set. */ boolean hasAdminState(); /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @return The adminState. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState(); /** * optional uint64 cacheCapacity = 11 [default = 0]; * @return Whether the cacheCapacity field is set. */ boolean hasCacheCapacity(); /** * optional uint64 cacheCapacity = 11 [default = 0]; * @return The cacheCapacity. */ long getCacheCapacity(); /** * optional uint64 cacheUsed = 12 [default = 0]; * @return Whether the cacheUsed field is set. */ boolean hasCacheUsed(); /** * optional uint64 cacheUsed = 12 [default = 0]; * @return The cacheUsed. */ long getCacheUsed(); /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @return Whether the lastUpdateMonotonic field is set. */ boolean hasLastUpdateMonotonic(); /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @return The lastUpdateMonotonic. */ long getLastUpdateMonotonic(); /** * optional string upgradeDomain = 14; * @return Whether the upgradeDomain field is set. */ boolean hasUpgradeDomain(); /** * optional string upgradeDomain = 14; * @return The upgradeDomain. */ java.lang.String getUpgradeDomain(); /** * optional string upgradeDomain = 14; * @return The bytes for upgradeDomain. */ org.apache.hadoop.thirdparty.protobuf.ByteString getUpgradeDomainBytes(); /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @return Whether the lastBlockReportTime field is set. */ boolean hasLastBlockReportTime(); /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @return The lastBlockReportTime. */ long getLastBlockReportTime(); /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @return Whether the lastBlockReportMonotonic field is set. */ boolean hasLastBlockReportMonotonic(); /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @return The lastBlockReportMonotonic. */ long getLastBlockReportMonotonic(); /** * optional uint32 numBlocks = 17 [default = 0]; * @return Whether the numBlocks field is set. */ boolean hasNumBlocks(); /** * optional uint32 numBlocks = 17 [default = 0]; * @return The numBlocks. */ int getNumBlocks(); } /** *
   **
   * The status of a Datanode
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto} */ public static final class DatanodeInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeInfoProto) DatanodeInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeInfoProto.newBuilder() to construct. private DatanodeInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeInfoProto() { location_ = ""; adminState_ = 0; upgradeDomain_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DatanodeInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.DatanodeInfoProto.AdminState} */ public enum AdminState implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * NORMAL = 0; */ NORMAL(0), /** * DECOMMISSION_INPROGRESS = 1; */ DECOMMISSION_INPROGRESS(1), /** * DECOMMISSIONED = 2; */ DECOMMISSIONED(2), /** * ENTERING_MAINTENANCE = 3; */ ENTERING_MAINTENANCE(3), /** * IN_MAINTENANCE = 4; */ IN_MAINTENANCE(4), ; /** * NORMAL = 0; */ public static final int NORMAL_VALUE = 0; /** * DECOMMISSION_INPROGRESS = 1; */ public static final int DECOMMISSION_INPROGRESS_VALUE = 1; /** * DECOMMISSIONED = 2; */ public static final int DECOMMISSIONED_VALUE = 2; /** * ENTERING_MAINTENANCE = 3; */ public static final int ENTERING_MAINTENANCE_VALUE = 3; /** * IN_MAINTENANCE = 4; */ public static final int IN_MAINTENANCE_VALUE = 4; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AdminState valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static AdminState forNumber(int value) { switch (value) { case 0: return NORMAL; case 1: return DECOMMISSION_INPROGRESS; case 2: return DECOMMISSIONED; case 3: return ENTERING_MAINTENANCE; case 4: return IN_MAINTENANCE; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< AdminState> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public AdminState findValueByNumber(int number) { return AdminState.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0); } private static final AdminState[] VALUES = values(); public static AdminState valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private AdminState(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeInfoProto.AdminState) } private int bitField0_; public static final int ID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_; /** * required .hadoop.hdfs.DatanodeIDProto id = 1; * @return Whether the id field is set. */ @java.lang.Override public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; * @return The id. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } public static final int CAPACITY_FIELD_NUMBER = 2; private long capacity_ = 0L; /** * optional uint64 capacity = 2 [default = 0]; * @return Whether the capacity field is set. */ @java.lang.Override public boolean hasCapacity() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 capacity = 2 [default = 0]; * @return The capacity. */ @java.lang.Override public long getCapacity() { return capacity_; } public static final int DFSUSED_FIELD_NUMBER = 3; private long dfsUsed_ = 0L; /** * optional uint64 dfsUsed = 3 [default = 0]; * @return Whether the dfsUsed field is set. */ @java.lang.Override public boolean hasDfsUsed() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 dfsUsed = 3 [default = 0]; * @return The dfsUsed. */ @java.lang.Override public long getDfsUsed() { return dfsUsed_; } public static final int REMAINING_FIELD_NUMBER = 4; private long remaining_ = 0L; /** * optional uint64 remaining = 4 [default = 0]; * @return Whether the remaining field is set. */ @java.lang.Override public boolean hasRemaining() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 remaining = 4 [default = 0]; * @return The remaining. */ @java.lang.Override public long getRemaining() { return remaining_; } public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5; private long blockPoolUsed_ = 0L; /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @return Whether the blockPoolUsed field is set. */ @java.lang.Override public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @return The blockPoolUsed. */ @java.lang.Override public long getBlockPoolUsed() { return blockPoolUsed_; } public static final int LASTUPDATE_FIELD_NUMBER = 6; private long lastUpdate_ = 0L; /** * optional uint64 lastUpdate = 6 [default = 0]; * @return Whether the lastUpdate field is set. */ @java.lang.Override public boolean hasLastUpdate() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 lastUpdate = 6 [default = 0]; * @return The lastUpdate. */ @java.lang.Override public long getLastUpdate() { return lastUpdate_; } public static final int XCEIVERCOUNT_FIELD_NUMBER = 7; private int xceiverCount_ = 0; /** * optional uint32 xceiverCount = 7 [default = 0]; * @return Whether the xceiverCount field is set. */ @java.lang.Override public boolean hasXceiverCount() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint32 xceiverCount = 7 [default = 0]; * @return The xceiverCount. */ @java.lang.Override public int getXceiverCount() { return xceiverCount_; } public static final int LOCATION_FIELD_NUMBER = 8; @SuppressWarnings("serial") private volatile java.lang.Object location_ = ""; /** * optional string location = 8; * @return Whether the location field is set. */ @java.lang.Override public boolean hasLocation() { return ((bitField0_ & 0x00000080) != 0); } /** * optional string location = 8; * @return The location. */ @java.lang.Override public java.lang.String getLocation() { java.lang.Object ref = location_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { location_ = s; } return s; } } /** * optional string location = 8; * @return The bytes for location. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getLocationBytes() { java.lang.Object ref = location_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); location_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int NONDFSUSED_FIELD_NUMBER = 9; private long nonDfsUsed_ = 0L; /** * optional uint64 nonDfsUsed = 9; * @return Whether the nonDfsUsed field is set. */ @java.lang.Override public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 nonDfsUsed = 9; * @return The nonDfsUsed. */ @java.lang.Override public long getNonDfsUsed() { return nonDfsUsed_; } public static final int ADMINSTATE_FIELD_NUMBER = 10; private int adminState_ = 0; /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @return Whether the adminState field is set. */ @java.lang.Override public boolean hasAdminState() { return ((bitField0_ & 0x00000200) != 0); } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @return The adminState. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.forNumber(adminState_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL : result; } public static final int CACHECAPACITY_FIELD_NUMBER = 11; private long cacheCapacity_ = 0L; /** * optional uint64 cacheCapacity = 11 [default = 0]; * @return Whether the cacheCapacity field is set. */ @java.lang.Override public boolean hasCacheCapacity() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 cacheCapacity = 11 [default = 0]; * @return The cacheCapacity. */ @java.lang.Override public long getCacheCapacity() { return cacheCapacity_; } public static final int CACHEUSED_FIELD_NUMBER = 12; private long cacheUsed_ = 0L; /** * optional uint64 cacheUsed = 12 [default = 0]; * @return Whether the cacheUsed field is set. */ @java.lang.Override public boolean hasCacheUsed() { return ((bitField0_ & 0x00000800) != 0); } /** * optional uint64 cacheUsed = 12 [default = 0]; * @return The cacheUsed. */ @java.lang.Override public long getCacheUsed() { return cacheUsed_; } public static final int LASTUPDATEMONOTONIC_FIELD_NUMBER = 13; private long lastUpdateMonotonic_ = 0L; /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @return Whether the lastUpdateMonotonic field is set. */ @java.lang.Override public boolean hasLastUpdateMonotonic() { return ((bitField0_ & 0x00001000) != 0); } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @return The lastUpdateMonotonic. */ @java.lang.Override public long getLastUpdateMonotonic() { return lastUpdateMonotonic_; } public static final int UPGRADEDOMAIN_FIELD_NUMBER = 14; @SuppressWarnings("serial") private volatile java.lang.Object upgradeDomain_ = ""; /** * optional string upgradeDomain = 14; * @return Whether the upgradeDomain field is set. */ @java.lang.Override public boolean hasUpgradeDomain() { return ((bitField0_ & 0x00002000) != 0); } /** * optional string upgradeDomain = 14; * @return The upgradeDomain. */ @java.lang.Override public java.lang.String getUpgradeDomain() { java.lang.Object ref = upgradeDomain_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { upgradeDomain_ = s; } return s; } } /** * optional string upgradeDomain = 14; * @return The bytes for upgradeDomain. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getUpgradeDomainBytes() { java.lang.Object ref = upgradeDomain_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); upgradeDomain_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int LASTBLOCKREPORTTIME_FIELD_NUMBER = 15; private long lastBlockReportTime_ = 0L; /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @return Whether the lastBlockReportTime field is set. */ @java.lang.Override public boolean hasLastBlockReportTime() { return ((bitField0_ & 0x00004000) != 0); } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @return The lastBlockReportTime. */ @java.lang.Override public long getLastBlockReportTime() { return lastBlockReportTime_; } public static final int LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER = 16; private long lastBlockReportMonotonic_ = 0L; /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @return Whether the lastBlockReportMonotonic field is set. */ @java.lang.Override public boolean hasLastBlockReportMonotonic() { return ((bitField0_ & 0x00008000) != 0); } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @return The lastBlockReportMonotonic. */ @java.lang.Override public long getLastBlockReportMonotonic() { return lastBlockReportMonotonic_; } public static final int NUMBLOCKS_FIELD_NUMBER = 17; private int numBlocks_ = 0; /** * optional uint32 numBlocks = 17 [default = 0]; * @return Whether the numBlocks field is set. */ @java.lang.Override public boolean hasNumBlocks() { return ((bitField0_ & 0x00010000) != 0); } /** * optional uint32 numBlocks = 17 [default = 0]; * @return The numBlocks. */ @java.lang.Override public int getNumBlocks() { return numBlocks_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasId()) { memoizedIsInitialized = 0; return false; } if (!getId().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getId()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, capacity_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, dfsUsed_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, remaining_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, blockPoolUsed_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, lastUpdate_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt32(7, xceiverCount_); } if (((bitField0_ & 0x00000080) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, location_); } if (((bitField0_ & 0x00000100) != 0)) { output.writeUInt64(9, nonDfsUsed_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeEnum(10, adminState_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeUInt64(11, cacheCapacity_); } if (((bitField0_ & 0x00000800) != 0)) { output.writeUInt64(12, cacheUsed_); } if (((bitField0_ & 0x00001000) != 0)) { output.writeUInt64(13, lastUpdateMonotonic_); } if (((bitField0_ & 0x00002000) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 14, upgradeDomain_); } if (((bitField0_ & 0x00004000) != 0)) { output.writeUInt64(15, lastBlockReportTime_); } if (((bitField0_ & 0x00008000) != 0)) { output.writeUInt64(16, lastBlockReportMonotonic_); } if (((bitField0_ & 0x00010000) != 0)) { output.writeUInt32(17, numBlocks_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getId()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, capacity_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, dfsUsed_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, remaining_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, blockPoolUsed_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, lastUpdate_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(7, xceiverCount_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(8, location_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(9, nonDfsUsed_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(10, adminState_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(11, cacheCapacity_); } if (((bitField0_ & 0x00000800) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(12, cacheUsed_); } if (((bitField0_ & 0x00001000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(13, lastUpdateMonotonic_); } if (((bitField0_ & 0x00002000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(14, upgradeDomain_); } if (((bitField0_ & 0x00004000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(15, lastBlockReportTime_); } if (((bitField0_ & 0x00008000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(16, lastBlockReportMonotonic_); } if (((bitField0_ & 0x00010000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(17, numBlocks_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj; if (hasId() != other.hasId()) return false; if (hasId()) { if (!getId() .equals(other.getId())) return false; } if (hasCapacity() != other.hasCapacity()) return false; if (hasCapacity()) { if (getCapacity() != other.getCapacity()) return false; } if (hasDfsUsed() != other.hasDfsUsed()) return false; if (hasDfsUsed()) { if (getDfsUsed() != other.getDfsUsed()) return false; } if (hasRemaining() != other.hasRemaining()) return false; if (hasRemaining()) { if (getRemaining() != other.getRemaining()) return false; } if (hasBlockPoolUsed() != other.hasBlockPoolUsed()) return false; if (hasBlockPoolUsed()) { if (getBlockPoolUsed() != other.getBlockPoolUsed()) return false; } if (hasLastUpdate() != other.hasLastUpdate()) return false; if (hasLastUpdate()) { if (getLastUpdate() != other.getLastUpdate()) return false; } if (hasXceiverCount() != other.hasXceiverCount()) return false; if (hasXceiverCount()) { if (getXceiverCount() != other.getXceiverCount()) return false; } if (hasLocation() != other.hasLocation()) return false; if (hasLocation()) { if (!getLocation() .equals(other.getLocation())) return false; } if (hasNonDfsUsed() != other.hasNonDfsUsed()) return false; if (hasNonDfsUsed()) { if (getNonDfsUsed() != other.getNonDfsUsed()) return false; } if (hasAdminState() != other.hasAdminState()) return false; if (hasAdminState()) { if (adminState_ != other.adminState_) return false; } if (hasCacheCapacity() != other.hasCacheCapacity()) return false; if (hasCacheCapacity()) { if (getCacheCapacity() != other.getCacheCapacity()) return false; } if (hasCacheUsed() != other.hasCacheUsed()) return false; if (hasCacheUsed()) { if (getCacheUsed() != other.getCacheUsed()) return false; } if (hasLastUpdateMonotonic() != other.hasLastUpdateMonotonic()) return false; if (hasLastUpdateMonotonic()) { if (getLastUpdateMonotonic() != other.getLastUpdateMonotonic()) return false; } if (hasUpgradeDomain() != other.hasUpgradeDomain()) return false; if (hasUpgradeDomain()) { if (!getUpgradeDomain() .equals(other.getUpgradeDomain())) return false; } if (hasLastBlockReportTime() != other.hasLastBlockReportTime()) return false; if (hasLastBlockReportTime()) { if (getLastBlockReportTime() != other.getLastBlockReportTime()) return false; } if (hasLastBlockReportMonotonic() != other.hasLastBlockReportMonotonic()) return false; if (hasLastBlockReportMonotonic()) { if (getLastBlockReportMonotonic() != other.getLastBlockReportMonotonic()) return false; } if (hasNumBlocks() != other.hasNumBlocks()) return false; if (hasNumBlocks()) { if (getNumBlocks() != other.getNumBlocks()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + getId().hashCode(); } if (hasCapacity()) { hash = (37 * hash) + CAPACITY_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCapacity()); } if (hasDfsUsed()) { hash = (37 * hash) + DFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDfsUsed()); } if (hasRemaining()) { hash = (37 * hash) + REMAINING_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getRemaining()); } if (hasBlockPoolUsed()) { hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockPoolUsed()); } if (hasLastUpdate()) { hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastUpdate()); } if (hasXceiverCount()) { hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER; hash = (53 * hash) + getXceiverCount(); } if (hasLocation()) { hash = (37 * hash) + LOCATION_FIELD_NUMBER; hash = (53 * hash) + getLocation().hashCode(); } if (hasNonDfsUsed()) { hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNonDfsUsed()); } if (hasAdminState()) { hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER; hash = (53 * hash) + adminState_; } if (hasCacheCapacity()) { hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCacheCapacity()); } if (hasCacheUsed()) { hash = (37 * hash) + CACHEUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCacheUsed()); } if (hasLastUpdateMonotonic()) { hash = (37 * hash) + LASTUPDATEMONOTONIC_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastUpdateMonotonic()); } if (hasUpgradeDomain()) { hash = (37 * hash) + UPGRADEDOMAIN_FIELD_NUMBER; hash = (53 * hash) + getUpgradeDomain().hashCode(); } if (hasLastBlockReportTime()) { hash = (37 * hash) + LASTBLOCKREPORTTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastBlockReportTime()); } if (hasLastBlockReportMonotonic()) { hash = (37 * hash) + LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastBlockReportMonotonic()); } if (hasNumBlocks()) { hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER; hash = (53 * hash) + getNumBlocks(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * The status of a Datanode
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getIdFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; id_ = null; if (idBuilder_ != null) { idBuilder_.dispose(); idBuilder_ = null; } capacity_ = 0L; dfsUsed_ = 0L; remaining_ = 0L; blockPoolUsed_ = 0L; lastUpdate_ = 0L; xceiverCount_ = 0; location_ = ""; nonDfsUsed_ = 0L; adminState_ = 0; cacheCapacity_ = 0L; cacheUsed_ = 0L; lastUpdateMonotonic_ = 0L; upgradeDomain_ = ""; lastBlockReportTime_ = 0L; lastBlockReportMonotonic_ = 0L; numBlocks_ = 0; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.id_ = idBuilder_ == null ? id_ : idBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.capacity_ = capacity_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.dfsUsed_ = dfsUsed_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.remaining_ = remaining_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.blockPoolUsed_ = blockPoolUsed_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.lastUpdate_ = lastUpdate_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.xceiverCount_ = xceiverCount_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.location_ = location_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { result.nonDfsUsed_ = nonDfsUsed_; to_bitField0_ |= 0x00000100; } if (((from_bitField0_ & 0x00000200) != 0)) { result.adminState_ = adminState_; to_bitField0_ |= 0x00000200; } if (((from_bitField0_ & 0x00000400) != 0)) { result.cacheCapacity_ = cacheCapacity_; to_bitField0_ |= 0x00000400; } if (((from_bitField0_ & 0x00000800) != 0)) { result.cacheUsed_ = cacheUsed_; to_bitField0_ |= 0x00000800; } if (((from_bitField0_ & 0x00001000) != 0)) { result.lastUpdateMonotonic_ = lastUpdateMonotonic_; to_bitField0_ |= 0x00001000; } if (((from_bitField0_ & 0x00002000) != 0)) { result.upgradeDomain_ = upgradeDomain_; to_bitField0_ |= 0x00002000; } if (((from_bitField0_ & 0x00004000) != 0)) { result.lastBlockReportTime_ = lastBlockReportTime_; to_bitField0_ |= 0x00004000; } if (((from_bitField0_ & 0x00008000) != 0)) { result.lastBlockReportMonotonic_ = lastBlockReportMonotonic_; to_bitField0_ |= 0x00008000; } if (((from_bitField0_ & 0x00010000) != 0)) { result.numBlocks_ = numBlocks_; to_bitField0_ |= 0x00010000; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this; if (other.hasId()) { mergeId(other.getId()); } if (other.hasCapacity()) { setCapacity(other.getCapacity()); } if (other.hasDfsUsed()) { setDfsUsed(other.getDfsUsed()); } if (other.hasRemaining()) { setRemaining(other.getRemaining()); } if (other.hasBlockPoolUsed()) { setBlockPoolUsed(other.getBlockPoolUsed()); } if (other.hasLastUpdate()) { setLastUpdate(other.getLastUpdate()); } if (other.hasXceiverCount()) { setXceiverCount(other.getXceiverCount()); } if (other.hasLocation()) { location_ = other.location_; bitField0_ |= 0x00000080; onChanged(); } if (other.hasNonDfsUsed()) { setNonDfsUsed(other.getNonDfsUsed()); } if (other.hasAdminState()) { setAdminState(other.getAdminState()); } if (other.hasCacheCapacity()) { setCacheCapacity(other.getCacheCapacity()); } if (other.hasCacheUsed()) { setCacheUsed(other.getCacheUsed()); } if (other.hasLastUpdateMonotonic()) { setLastUpdateMonotonic(other.getLastUpdateMonotonic()); } if (other.hasUpgradeDomain()) { upgradeDomain_ = other.upgradeDomain_; bitField0_ |= 0x00002000; onChanged(); } if (other.hasLastBlockReportTime()) { setLastBlockReportTime(other.getLastBlockReportTime()); } if (other.hasLastBlockReportMonotonic()) { setLastBlockReportMonotonic(other.getLastBlockReportMonotonic()); } if (other.hasNumBlocks()) { setNumBlocks(other.getNumBlocks()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasId()) { return false; } if (!getId().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { input.readMessage( getIdFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000001; break; } // case 10 case 16: { capacity_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { dfsUsed_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { remaining_ = input.readUInt64(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { blockPoolUsed_ = input.readUInt64(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { lastUpdate_ = input.readUInt64(); bitField0_ |= 0x00000020; break; } // case 48 case 56: { xceiverCount_ = input.readUInt32(); bitField0_ |= 0x00000040; break; } // case 56 case 66: { location_ = input.readBytes(); bitField0_ |= 0x00000080; break; } // case 66 case 72: { nonDfsUsed_ = input.readUInt64(); bitField0_ |= 0x00000100; break; } // case 72 case 80: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(10, tmpRaw); } else { adminState_ = tmpRaw; bitField0_ |= 0x00000200; } break; } // case 80 case 88: { cacheCapacity_ = input.readUInt64(); bitField0_ |= 0x00000400; break; } // case 88 case 96: { cacheUsed_ = input.readUInt64(); bitField0_ |= 0x00000800; break; } // case 96 case 104: { lastUpdateMonotonic_ = input.readUInt64(); bitField0_ |= 0x00001000; break; } // case 104 case 114: { upgradeDomain_ = input.readBytes(); bitField0_ |= 0x00002000; break; } // case 114 case 120: { lastBlockReportTime_ = input.readUInt64(); bitField0_ |= 0x00004000; break; } // case 120 case 128: { lastBlockReportMonotonic_ = input.readUInt64(); bitField0_ |= 0x00008000; break; } // case 128 case 136: { numBlocks_ = input.readUInt32(); bitField0_ |= 0x00010000; break; } // case 136 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_; /** * required .hadoop.hdfs.DatanodeIDProto id = 1; * @return Whether the id field is set. */ public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; * @return The id. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { if (idBuilder_ == null) { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } else { return idBuilder_.getMessage(); } } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (idBuilder_ == null) { if (value == null) { throw new NullPointerException(); } id_ = value; } else { idBuilder_.setMessage(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder setId( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { if (idBuilder_ == null) { id_ = builderForValue.build(); } else { idBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (idBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && id_ != null && id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { getIdBuilder().mergeFrom(value); } else { id_ = value; } } else { idBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = null; if (idBuilder_ != null) { idBuilder_.dispose(); idBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getIdBuilder() { bitField0_ |= 0x00000001; onChanged(); return getIdFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { if (idBuilder_ != null) { return idBuilder_.getMessageOrBuilder(); } else { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> getIdFieldBuilder() { if (idBuilder_ == null) { idBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( getId(), getParentForChildren(), isClean()); id_ = null; } return idBuilder_; } private long capacity_ ; /** * optional uint64 capacity = 2 [default = 0]; * @return Whether the capacity field is set. */ @java.lang.Override public boolean hasCapacity() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 capacity = 2 [default = 0]; * @return The capacity. */ @java.lang.Override public long getCapacity() { return capacity_; } /** * optional uint64 capacity = 2 [default = 0]; * @param value The capacity to set. * @return This builder for chaining. */ public Builder setCapacity(long value) { capacity_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional uint64 capacity = 2 [default = 0]; * @return This builder for chaining. */ public Builder clearCapacity() { bitField0_ = (bitField0_ & ~0x00000002); capacity_ = 0L; onChanged(); return this; } private long dfsUsed_ ; /** * optional uint64 dfsUsed = 3 [default = 0]; * @return Whether the dfsUsed field is set. */ @java.lang.Override public boolean hasDfsUsed() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 dfsUsed = 3 [default = 0]; * @return The dfsUsed. */ @java.lang.Override public long getDfsUsed() { return dfsUsed_; } /** * optional uint64 dfsUsed = 3 [default = 0]; * @param value The dfsUsed to set. * @return This builder for chaining. */ public Builder setDfsUsed(long value) { dfsUsed_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional uint64 dfsUsed = 3 [default = 0]; * @return This builder for chaining. */ public Builder clearDfsUsed() { bitField0_ = (bitField0_ & ~0x00000004); dfsUsed_ = 0L; onChanged(); return this; } private long remaining_ ; /** * optional uint64 remaining = 4 [default = 0]; * @return Whether the remaining field is set. */ @java.lang.Override public boolean hasRemaining() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 remaining = 4 [default = 0]; * @return The remaining. */ @java.lang.Override public long getRemaining() { return remaining_; } /** * optional uint64 remaining = 4 [default = 0]; * @param value The remaining to set. * @return This builder for chaining. */ public Builder setRemaining(long value) { remaining_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional uint64 remaining = 4 [default = 0]; * @return This builder for chaining. */ public Builder clearRemaining() { bitField0_ = (bitField0_ & ~0x00000008); remaining_ = 0L; onChanged(); return this; } private long blockPoolUsed_ ; /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @return Whether the blockPoolUsed field is set. */ @java.lang.Override public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @return The blockPoolUsed. */ @java.lang.Override public long getBlockPoolUsed() { return blockPoolUsed_; } /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @param value The blockPoolUsed to set. * @return This builder for chaining. */ public Builder setBlockPoolUsed(long value) { blockPoolUsed_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional uint64 blockPoolUsed = 5 [default = 0]; * @return This builder for chaining. */ public Builder clearBlockPoolUsed() { bitField0_ = (bitField0_ & ~0x00000010); blockPoolUsed_ = 0L; onChanged(); return this; } private long lastUpdate_ ; /** * optional uint64 lastUpdate = 6 [default = 0]; * @return Whether the lastUpdate field is set. */ @java.lang.Override public boolean hasLastUpdate() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 lastUpdate = 6 [default = 0]; * @return The lastUpdate. */ @java.lang.Override public long getLastUpdate() { return lastUpdate_; } /** * optional uint64 lastUpdate = 6 [default = 0]; * @param value The lastUpdate to set. * @return This builder for chaining. */ public Builder setLastUpdate(long value) { lastUpdate_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional uint64 lastUpdate = 6 [default = 0]; * @return This builder for chaining. */ public Builder clearLastUpdate() { bitField0_ = (bitField0_ & ~0x00000020); lastUpdate_ = 0L; onChanged(); return this; } private int xceiverCount_ ; /** * optional uint32 xceiverCount = 7 [default = 0]; * @return Whether the xceiverCount field is set. */ @java.lang.Override public boolean hasXceiverCount() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint32 xceiverCount = 7 [default = 0]; * @return The xceiverCount. */ @java.lang.Override public int getXceiverCount() { return xceiverCount_; } /** * optional uint32 xceiverCount = 7 [default = 0]; * @param value The xceiverCount to set. * @return This builder for chaining. */ public Builder setXceiverCount(int value) { xceiverCount_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * optional uint32 xceiverCount = 7 [default = 0]; * @return This builder for chaining. */ public Builder clearXceiverCount() { bitField0_ = (bitField0_ & ~0x00000040); xceiverCount_ = 0; onChanged(); return this; } private java.lang.Object location_ = ""; /** * optional string location = 8; * @return Whether the location field is set. */ public boolean hasLocation() { return ((bitField0_ & 0x00000080) != 0); } /** * optional string location = 8; * @return The location. */ public java.lang.String getLocation() { java.lang.Object ref = location_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { location_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string location = 8; * @return The bytes for location. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getLocationBytes() { java.lang.Object ref = location_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); location_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string location = 8; * @param value The location to set. * @return This builder for chaining. */ public Builder setLocation( java.lang.String value) { if (value == null) { throw new NullPointerException(); } location_ = value; bitField0_ |= 0x00000080; onChanged(); return this; } /** * optional string location = 8; * @return This builder for chaining. */ public Builder clearLocation() { location_ = getDefaultInstance().getLocation(); bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } /** * optional string location = 8; * @param value The bytes for location to set. * @return This builder for chaining. */ public Builder setLocationBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } location_ = value; bitField0_ |= 0x00000080; onChanged(); return this; } private long nonDfsUsed_ ; /** * optional uint64 nonDfsUsed = 9; * @return Whether the nonDfsUsed field is set. */ @java.lang.Override public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 nonDfsUsed = 9; * @return The nonDfsUsed. */ @java.lang.Override public long getNonDfsUsed() { return nonDfsUsed_; } /** * optional uint64 nonDfsUsed = 9; * @param value The nonDfsUsed to set. * @return This builder for chaining. */ public Builder setNonDfsUsed(long value) { nonDfsUsed_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** * optional uint64 nonDfsUsed = 9; * @return This builder for chaining. */ public Builder clearNonDfsUsed() { bitField0_ = (bitField0_ & ~0x00000100); nonDfsUsed_ = 0L; onChanged(); return this; } private int adminState_ = 0; /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @return Whether the adminState field is set. */ @java.lang.Override public boolean hasAdminState() { return ((bitField0_ & 0x00000200) != 0); } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @return The adminState. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.forNumber(adminState_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL : result; } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @param value The adminState to set. * @return This builder for chaining. */ public Builder setAdminState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000200; adminState_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; * @return This builder for chaining. */ public Builder clearAdminState() { bitField0_ = (bitField0_ & ~0x00000200); adminState_ = 0; onChanged(); return this; } private long cacheCapacity_ ; /** * optional uint64 cacheCapacity = 11 [default = 0]; * @return Whether the cacheCapacity field is set. */ @java.lang.Override public boolean hasCacheCapacity() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 cacheCapacity = 11 [default = 0]; * @return The cacheCapacity. */ @java.lang.Override public long getCacheCapacity() { return cacheCapacity_; } /** * optional uint64 cacheCapacity = 11 [default = 0]; * @param value The cacheCapacity to set. * @return This builder for chaining. */ public Builder setCacheCapacity(long value) { cacheCapacity_ = value; bitField0_ |= 0x00000400; onChanged(); return this; } /** * optional uint64 cacheCapacity = 11 [default = 0]; * @return This builder for chaining. */ public Builder clearCacheCapacity() { bitField0_ = (bitField0_ & ~0x00000400); cacheCapacity_ = 0L; onChanged(); return this; } private long cacheUsed_ ; /** * optional uint64 cacheUsed = 12 [default = 0]; * @return Whether the cacheUsed field is set. */ @java.lang.Override public boolean hasCacheUsed() { return ((bitField0_ & 0x00000800) != 0); } /** * optional uint64 cacheUsed = 12 [default = 0]; * @return The cacheUsed. */ @java.lang.Override public long getCacheUsed() { return cacheUsed_; } /** * optional uint64 cacheUsed = 12 [default = 0]; * @param value The cacheUsed to set. * @return This builder for chaining. */ public Builder setCacheUsed(long value) { cacheUsed_ = value; bitField0_ |= 0x00000800; onChanged(); return this; } /** * optional uint64 cacheUsed = 12 [default = 0]; * @return This builder for chaining. */ public Builder clearCacheUsed() { bitField0_ = (bitField0_ & ~0x00000800); cacheUsed_ = 0L; onChanged(); return this; } private long lastUpdateMonotonic_ ; /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @return Whether the lastUpdateMonotonic field is set. */ @java.lang.Override public boolean hasLastUpdateMonotonic() { return ((bitField0_ & 0x00001000) != 0); } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @return The lastUpdateMonotonic. */ @java.lang.Override public long getLastUpdateMonotonic() { return lastUpdateMonotonic_; } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @param value The lastUpdateMonotonic to set. * @return This builder for chaining. */ public Builder setLastUpdateMonotonic(long value) { lastUpdateMonotonic_ = value; bitField0_ |= 0x00001000; onChanged(); return this; } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; * @return This builder for chaining. */ public Builder clearLastUpdateMonotonic() { bitField0_ = (bitField0_ & ~0x00001000); lastUpdateMonotonic_ = 0L; onChanged(); return this; } private java.lang.Object upgradeDomain_ = ""; /** * optional string upgradeDomain = 14; * @return Whether the upgradeDomain field is set. */ public boolean hasUpgradeDomain() { return ((bitField0_ & 0x00002000) != 0); } /** * optional string upgradeDomain = 14; * @return The upgradeDomain. */ public java.lang.String getUpgradeDomain() { java.lang.Object ref = upgradeDomain_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { upgradeDomain_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string upgradeDomain = 14; * @return The bytes for upgradeDomain. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getUpgradeDomainBytes() { java.lang.Object ref = upgradeDomain_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); upgradeDomain_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string upgradeDomain = 14; * @param value The upgradeDomain to set. * @return This builder for chaining. */ public Builder setUpgradeDomain( java.lang.String value) { if (value == null) { throw new NullPointerException(); } upgradeDomain_ = value; bitField0_ |= 0x00002000; onChanged(); return this; } /** * optional string upgradeDomain = 14; * @return This builder for chaining. */ public Builder clearUpgradeDomain() { upgradeDomain_ = getDefaultInstance().getUpgradeDomain(); bitField0_ = (bitField0_ & ~0x00002000); onChanged(); return this; } /** * optional string upgradeDomain = 14; * @param value The bytes for upgradeDomain to set. * @return This builder for chaining. */ public Builder setUpgradeDomainBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } upgradeDomain_ = value; bitField0_ |= 0x00002000; onChanged(); return this; } private long lastBlockReportTime_ ; /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @return Whether the lastBlockReportTime field is set. */ @java.lang.Override public boolean hasLastBlockReportTime() { return ((bitField0_ & 0x00004000) != 0); } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @return The lastBlockReportTime. */ @java.lang.Override public long getLastBlockReportTime() { return lastBlockReportTime_; } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @param value The lastBlockReportTime to set. * @return This builder for chaining. */ public Builder setLastBlockReportTime(long value) { lastBlockReportTime_ = value; bitField0_ |= 0x00004000; onChanged(); return this; } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; * @return This builder for chaining. */ public Builder clearLastBlockReportTime() { bitField0_ = (bitField0_ & ~0x00004000); lastBlockReportTime_ = 0L; onChanged(); return this; } private long lastBlockReportMonotonic_ ; /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @return Whether the lastBlockReportMonotonic field is set. */ @java.lang.Override public boolean hasLastBlockReportMonotonic() { return ((bitField0_ & 0x00008000) != 0); } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @return The lastBlockReportMonotonic. */ @java.lang.Override public long getLastBlockReportMonotonic() { return lastBlockReportMonotonic_; } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @param value The lastBlockReportMonotonic to set. * @return This builder for chaining. */ public Builder setLastBlockReportMonotonic(long value) { lastBlockReportMonotonic_ = value; bitField0_ |= 0x00008000; onChanged(); return this; } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; * @return This builder for chaining. */ public Builder clearLastBlockReportMonotonic() { bitField0_ = (bitField0_ & ~0x00008000); lastBlockReportMonotonic_ = 0L; onChanged(); return this; } private int numBlocks_ ; /** * optional uint32 numBlocks = 17 [default = 0]; * @return Whether the numBlocks field is set. */ @java.lang.Override public boolean hasNumBlocks() { return ((bitField0_ & 0x00010000) != 0); } /** * optional uint32 numBlocks = 17 [default = 0]; * @return The numBlocks. */ @java.lang.Override public int getNumBlocks() { return numBlocks_; } /** * optional uint32 numBlocks = 17 [default = 0]; * @param value The numBlocks to set. * @return This builder for chaining. */ public Builder setNumBlocks(int value) { numBlocks_ = value; bitField0_ |= 0x00010000; onChanged(); return this; } /** * optional uint32 numBlocks = 17 [default = 0]; * @return This builder for chaining. */ public Builder clearNumBlocks() { bitField0_ = (bitField0_ & ~0x00010000); numBlocks_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeStorageProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeStorageProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string storageUuid = 1; * @return Whether the storageUuid field is set. */ boolean hasStorageUuid(); /** * required string storageUuid = 1; * @return The storageUuid. */ java.lang.String getStorageUuid(); /** * required string storageUuid = 1; * @return The bytes for storageUuid. */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes(); /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @return Whether the state field is set. */ boolean hasState(); /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @return The state. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState(); /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @return Whether the storageType field is set. */ boolean hasStorageType(); /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @return The storageType. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType(); } /** *
   **
   * Represents a storage available on the datanode
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto} */ public static final class DatanodeStorageProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeStorageProto) DatanodeStorageProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeStorageProto.newBuilder() to construct. private DatanodeStorageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeStorageProto() { storageUuid_ = ""; state_ = 0; storageType_ = 1; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DatanodeStorageProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.DatanodeStorageProto.StorageState} */ public enum StorageState implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * NORMAL = 0; */ NORMAL(0), /** * READ_ONLY_SHARED = 1; */ READ_ONLY_SHARED(1), ; /** * NORMAL = 0; */ public static final int NORMAL_VALUE = 0; /** * READ_ONLY_SHARED = 1; */ public static final int READ_ONLY_SHARED_VALUE = 1; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static StorageState valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static StorageState forNumber(int value) { switch (value) { case 0: return NORMAL; case 1: return READ_ONLY_SHARED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< StorageState> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public StorageState findValueByNumber(int number) { return StorageState.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDescriptor().getEnumTypes().get(0); } private static final StorageState[] VALUES = values(); public static StorageState valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private StorageState(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeStorageProto.StorageState) } private int bitField0_; public static final int STORAGEUUID_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1; * @return Whether the storageUuid field is set. */ @java.lang.Override public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1; * @return The storageUuid. */ @java.lang.Override public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } } /** * required string storageUuid = 1; * @return The bytes for storageUuid. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int STATE_FIELD_NUMBER = 2; private int state_ = 0; /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @return Whether the state field is set. */ @java.lang.Override public boolean hasState() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @return The state. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.forNumber(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL : result; } public static final int STORAGETYPE_FIELD_NUMBER = 3; private int storageType_ = 1; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @return Whether the storageType field is set. */ @java.lang.Override public boolean hasStorageType() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @return The storageType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasStorageUuid()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, state_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeEnum(3, storageType_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, state_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(3, storageType_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) obj; if (hasStorageUuid() != other.hasStorageUuid()) return false; if (hasStorageUuid()) { if (!getStorageUuid() .equals(other.getStorageUuid())) return false; } if (hasState() != other.hasState()) return false; if (hasState()) { if (state_ != other.state_) return false; } if (hasStorageType() != other.hasStorageType()) return false; if (hasStorageType()) { if (storageType_ != other.storageType_) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasStorageUuid()) { hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER; hash = (53 * hash) + getStorageUuid().hashCode(); } if (hasState()) { hash = (37 * hash) + STATE_FIELD_NUMBER; hash = (53 * hash) + state_; } if (hasStorageType()) { hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER; hash = (53 * hash) + storageType_; } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Represents a storage available on the datanode
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeStorageProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; storageUuid_ = ""; state_ = 0; storageType_ = 1; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.storageUuid_ = storageUuid_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.state_ = state_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.storageType_ = storageType_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) return this; if (other.hasStorageUuid()) { storageUuid_ = other.storageUuid_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasState()) { setState(other.getState()); } if (other.hasStorageType()) { setStorageType(other.getStorageType()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasStorageUuid()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { storageUuid_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(2, tmpRaw); } else { state_ = tmpRaw; bitField0_ |= 0x00000002; } break; } // case 16 case 24: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(3, tmpRaw); } else { storageType_ = tmpRaw; bitField0_ |= 0x00000004; } break; } // case 24 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1; * @return Whether the storageUuid field is set. */ public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1; * @return The storageUuid. */ public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string storageUuid = 1; * @return The bytes for storageUuid. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string storageUuid = 1; * @param value The storageUuid to set. * @return This builder for chaining. */ public Builder setStorageUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } storageUuid_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string storageUuid = 1; * @return This builder for chaining. */ public Builder clearStorageUuid() { storageUuid_ = getDefaultInstance().getStorageUuid(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string storageUuid = 1; * @param value The bytes for storageUuid to set. * @return This builder for chaining. */ public Builder setStorageUuidBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } storageUuid_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private int state_ = 0; /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @return Whether the state field is set. */ @java.lang.Override public boolean hasState() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @return The state. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.forNumber(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL : result; } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @param value The state to set. * @return This builder for chaining. */ public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; state_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; * @return This builder for chaining. */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000002); state_ = 0; onChanged(); return this; } private int storageType_ = 1; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @return Whether the storageType field is set. */ @java.lang.Override public boolean hasStorageType() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @return The storageType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @param value The storageType to set. * @return This builder for chaining. */ public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; storageType_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; * @return This builder for chaining. */ public Builder clearStorageType() { bitField0_ = (bitField0_ & ~0x00000004); storageType_ = 1; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeStorageProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeStorageProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeStorageProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageReportProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageReportProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return Whether the storageUuid field is set. */ @java.lang.Deprecated boolean hasStorageUuid(); /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return The storageUuid. */ @java.lang.Deprecated java.lang.String getStorageUuid(); /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return The bytes for storageUuid. */ @java.lang.Deprecated org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes(); /** * optional bool failed = 2 [default = false]; * @return Whether the failed field is set. */ boolean hasFailed(); /** * optional bool failed = 2 [default = false]; * @return The failed. */ boolean getFailed(); /** * optional uint64 capacity = 3 [default = 0]; * @return Whether the capacity field is set. */ boolean hasCapacity(); /** * optional uint64 capacity = 3 [default = 0]; * @return The capacity. */ long getCapacity(); /** * optional uint64 dfsUsed = 4 [default = 0]; * @return Whether the dfsUsed field is set. */ boolean hasDfsUsed(); /** * optional uint64 dfsUsed = 4 [default = 0]; * @return The dfsUsed. */ long getDfsUsed(); /** * optional uint64 remaining = 5 [default = 0]; * @return Whether the remaining field is set. */ boolean hasRemaining(); /** * optional uint64 remaining = 5 [default = 0]; * @return The remaining. */ long getRemaining(); /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @return Whether the blockPoolUsed field is set. */ boolean hasBlockPoolUsed(); /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @return The blockPoolUsed. */ long getBlockPoolUsed(); /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * @return Whether the storage field is set. */ boolean hasStorage(); /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * @return The storage. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage(); /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder(); /** * optional uint64 nonDfsUsed = 8; * @return Whether the nonDfsUsed field is set. */ boolean hasNonDfsUsed(); /** * optional uint64 nonDfsUsed = 8; * @return The nonDfsUsed. */ long getNonDfsUsed(); /** * optional string mount = 9; * @return Whether the mount field is set. */ boolean hasMount(); /** * optional string mount = 9; * @return The mount. */ java.lang.String getMount(); /** * optional string mount = 9; * @return The bytes for mount. */ org.apache.hadoop.thirdparty.protobuf.ByteString getMountBytes(); } /** * Protobuf type {@code hadoop.hdfs.StorageReportProto} */ public static final class StorageReportProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageReportProto) StorageReportProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageReportProto.newBuilder() to construct. private StorageReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageReportProto() { storageUuid_ = ""; mount_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new StorageReportProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class); } private int bitField0_; public static final int STORAGEUUID_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return Whether the storageUuid field is set. */ @java.lang.Override @java.lang.Deprecated public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return The storageUuid. */ @java.lang.Override @java.lang.Deprecated public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } } /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return The bytes for storageUuid. */ @java.lang.Override @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int FAILED_FIELD_NUMBER = 2; private boolean failed_ = false; /** * optional bool failed = 2 [default = false]; * @return Whether the failed field is set. */ @java.lang.Override public boolean hasFailed() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool failed = 2 [default = false]; * @return The failed. */ @java.lang.Override public boolean getFailed() { return failed_; } public static final int CAPACITY_FIELD_NUMBER = 3; private long capacity_ = 0L; /** * optional uint64 capacity = 3 [default = 0]; * @return Whether the capacity field is set. */ @java.lang.Override public boolean hasCapacity() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 capacity = 3 [default = 0]; * @return The capacity. */ @java.lang.Override public long getCapacity() { return capacity_; } public static final int DFSUSED_FIELD_NUMBER = 4; private long dfsUsed_ = 0L; /** * optional uint64 dfsUsed = 4 [default = 0]; * @return Whether the dfsUsed field is set. */ @java.lang.Override public boolean hasDfsUsed() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 dfsUsed = 4 [default = 0]; * @return The dfsUsed. */ @java.lang.Override public long getDfsUsed() { return dfsUsed_; } public static final int REMAINING_FIELD_NUMBER = 5; private long remaining_ = 0L; /** * optional uint64 remaining = 5 [default = 0]; * @return Whether the remaining field is set. */ @java.lang.Override public boolean hasRemaining() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 remaining = 5 [default = 0]; * @return The remaining. */ @java.lang.Override public long getRemaining() { return remaining_; } public static final int BLOCKPOOLUSED_FIELD_NUMBER = 6; private long blockPoolUsed_ = 0L; /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @return Whether the blockPoolUsed field is set. */ @java.lang.Override public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @return The blockPoolUsed. */ @java.lang.Override public long getBlockPoolUsed() { return blockPoolUsed_; } public static final int STORAGE_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_; /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * @return Whether the storage field is set. */ @java.lang.Override public boolean hasStorage() { return ((bitField0_ & 0x00000040) != 0); } /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * @return The storage. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } public static final int NONDFSUSED_FIELD_NUMBER = 8; private long nonDfsUsed_ = 0L; /** * optional uint64 nonDfsUsed = 8; * @return Whether the nonDfsUsed field is set. */ @java.lang.Override public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 nonDfsUsed = 8; * @return The nonDfsUsed. */ @java.lang.Override public long getNonDfsUsed() { return nonDfsUsed_; } public static final int MOUNT_FIELD_NUMBER = 9; @SuppressWarnings("serial") private volatile java.lang.Object mount_ = ""; /** * optional string mount = 9; * @return Whether the mount field is set. */ @java.lang.Override public boolean hasMount() { return ((bitField0_ & 0x00000100) != 0); } /** * optional string mount = 9; * @return The mount. */ @java.lang.Override public java.lang.String getMount() { java.lang.Object ref = mount_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { mount_ = s; } return s; } } /** * optional string mount = 9; * @return The bytes for mount. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getMountBytes() { java.lang.Object ref = mount_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); mount_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasStorageUuid()) { memoizedIsInitialized = 0; return false; } if (hasStorage()) { if (!getStorage().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, failed_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, capacity_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, dfsUsed_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, remaining_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, blockPoolUsed_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeMessage(7, getStorage()); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt64(8, nonDfsUsed_); } if (((bitField0_ & 0x00000100) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 9, mount_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, failed_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, capacity_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, dfsUsed_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, remaining_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, blockPoolUsed_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getStorage()); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(8, nonDfsUsed_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(9, mount_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) obj; if (hasStorageUuid() != other.hasStorageUuid()) return false; if (hasStorageUuid()) { if (!getStorageUuid() .equals(other.getStorageUuid())) return false; } if (hasFailed() != other.hasFailed()) return false; if (hasFailed()) { if (getFailed() != other.getFailed()) return false; } if (hasCapacity() != other.hasCapacity()) return false; if (hasCapacity()) { if (getCapacity() != other.getCapacity()) return false; } if (hasDfsUsed() != other.hasDfsUsed()) return false; if (hasDfsUsed()) { if (getDfsUsed() != other.getDfsUsed()) return false; } if (hasRemaining() != other.hasRemaining()) return false; if (hasRemaining()) { if (getRemaining() != other.getRemaining()) return false; } if (hasBlockPoolUsed() != other.hasBlockPoolUsed()) return false; if (hasBlockPoolUsed()) { if (getBlockPoolUsed() != other.getBlockPoolUsed()) return false; } if (hasStorage() != other.hasStorage()) return false; if (hasStorage()) { if (!getStorage() .equals(other.getStorage())) return false; } if (hasNonDfsUsed() != other.hasNonDfsUsed()) return false; if (hasNonDfsUsed()) { if (getNonDfsUsed() != other.getNonDfsUsed()) return false; } if (hasMount() != other.hasMount()) return false; if (hasMount()) { if (!getMount() .equals(other.getMount())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasStorageUuid()) { hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER; hash = (53 * hash) + getStorageUuid().hashCode(); } if (hasFailed()) { hash = (37 * hash) + FAILED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getFailed()); } if (hasCapacity()) { hash = (37 * hash) + CAPACITY_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCapacity()); } if (hasDfsUsed()) { hash = (37 * hash) + DFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDfsUsed()); } if (hasRemaining()) { hash = (37 * hash) + REMAINING_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getRemaining()); } if (hasBlockPoolUsed()) { hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockPoolUsed()); } if (hasStorage()) { hash = (37 * hash) + STORAGE_FIELD_NUMBER; hash = (53 * hash) + getStorage().hashCode(); } if (hasNonDfsUsed()) { hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNonDfsUsed()); } if (hasMount()) { hash = (37 * hash) + MOUNT_FIELD_NUMBER; hash = (53 * hash) + getMount().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageReportProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageReportProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getStorageFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; storageUuid_ = ""; failed_ = false; capacity_ = 0L; dfsUsed_ = 0L; remaining_ = 0L; blockPoolUsed_ = 0L; storage_ = null; if (storageBuilder_ != null) { storageBuilder_.dispose(); storageBuilder_ = null; } nonDfsUsed_ = 0L; mount_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.storageUuid_ = storageUuid_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.failed_ = failed_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.capacity_ = capacity_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.dfsUsed_ = dfsUsed_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.remaining_ = remaining_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.blockPoolUsed_ = blockPoolUsed_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.storage_ = storageBuilder_ == null ? storage_ : storageBuilder_.build(); to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.nonDfsUsed_ = nonDfsUsed_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { result.mount_ = mount_; to_bitField0_ |= 0x00000100; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()) return this; if (other.hasStorageUuid()) { storageUuid_ = other.storageUuid_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasFailed()) { setFailed(other.getFailed()); } if (other.hasCapacity()) { setCapacity(other.getCapacity()); } if (other.hasDfsUsed()) { setDfsUsed(other.getDfsUsed()); } if (other.hasRemaining()) { setRemaining(other.getRemaining()); } if (other.hasBlockPoolUsed()) { setBlockPoolUsed(other.getBlockPoolUsed()); } if (other.hasStorage()) { mergeStorage(other.getStorage()); } if (other.hasNonDfsUsed()) { setNonDfsUsed(other.getNonDfsUsed()); } if (other.hasMount()) { mount_ = other.mount_; bitField0_ |= 0x00000100; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasStorageUuid()) { return false; } if (hasStorage()) { if (!getStorage().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { storageUuid_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { failed_ = input.readBool(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { capacity_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { dfsUsed_ = input.readUInt64(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { remaining_ = input.readUInt64(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { blockPoolUsed_ = input.readUInt64(); bitField0_ |= 0x00000020; break; } // case 48 case 58: { input.readMessage( getStorageFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000040; break; } // case 58 case 64: { nonDfsUsed_ = input.readUInt64(); bitField0_ |= 0x00000080; break; } // case 64 case 74: { mount_ = input.readBytes(); bitField0_ |= 0x00000100; break; } // case 74 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return Whether the storageUuid field is set. */ @java.lang.Deprecated public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return The storageUuid. */ @java.lang.Deprecated public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return The bytes for storageUuid. */ @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @param value The storageUuid to set. * @return This builder for chaining. */ @java.lang.Deprecated public Builder setStorageUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } storageUuid_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @return This builder for chaining. */ @java.lang.Deprecated public Builder clearStorageUuid() { storageUuid_ = getDefaultInstance().getStorageUuid(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string storageUuid = 1 [deprecated = true]; * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated. * See hdfs.proto;l=152 * @param value The bytes for storageUuid to set. * @return This builder for chaining. */ @java.lang.Deprecated public Builder setStorageUuidBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } storageUuid_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private boolean failed_ ; /** * optional bool failed = 2 [default = false]; * @return Whether the failed field is set. */ @java.lang.Override public boolean hasFailed() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool failed = 2 [default = false]; * @return The failed. */ @java.lang.Override public boolean getFailed() { return failed_; } /** * optional bool failed = 2 [default = false]; * @param value The failed to set. * @return This builder for chaining. */ public Builder setFailed(boolean value) { failed_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional bool failed = 2 [default = false]; * @return This builder for chaining. */ public Builder clearFailed() { bitField0_ = (bitField0_ & ~0x00000002); failed_ = false; onChanged(); return this; } private long capacity_ ; /** * optional uint64 capacity = 3 [default = 0]; * @return Whether the capacity field is set. */ @java.lang.Override public boolean hasCapacity() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 capacity = 3 [default = 0]; * @return The capacity. */ @java.lang.Override public long getCapacity() { return capacity_; } /** * optional uint64 capacity = 3 [default = 0]; * @param value The capacity to set. * @return This builder for chaining. */ public Builder setCapacity(long value) { capacity_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional uint64 capacity = 3 [default = 0]; * @return This builder for chaining. */ public Builder clearCapacity() { bitField0_ = (bitField0_ & ~0x00000004); capacity_ = 0L; onChanged(); return this; } private long dfsUsed_ ; /** * optional uint64 dfsUsed = 4 [default = 0]; * @return Whether the dfsUsed field is set. */ @java.lang.Override public boolean hasDfsUsed() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 dfsUsed = 4 [default = 0]; * @return The dfsUsed. */ @java.lang.Override public long getDfsUsed() { return dfsUsed_; } /** * optional uint64 dfsUsed = 4 [default = 0]; * @param value The dfsUsed to set. * @return This builder for chaining. */ public Builder setDfsUsed(long value) { dfsUsed_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional uint64 dfsUsed = 4 [default = 0]; * @return This builder for chaining. */ public Builder clearDfsUsed() { bitField0_ = (bitField0_ & ~0x00000008); dfsUsed_ = 0L; onChanged(); return this; } private long remaining_ ; /** * optional uint64 remaining = 5 [default = 0]; * @return Whether the remaining field is set. */ @java.lang.Override public boolean hasRemaining() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 remaining = 5 [default = 0]; * @return The remaining. */ @java.lang.Override public long getRemaining() { return remaining_; } /** * optional uint64 remaining = 5 [default = 0]; * @param value The remaining to set. * @return This builder for chaining. */ public Builder setRemaining(long value) { remaining_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional uint64 remaining = 5 [default = 0]; * @return This builder for chaining. */ public Builder clearRemaining() { bitField0_ = (bitField0_ & ~0x00000010); remaining_ = 0L; onChanged(); return this; } private long blockPoolUsed_ ; /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @return Whether the blockPoolUsed field is set. */ @java.lang.Override public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @return The blockPoolUsed. */ @java.lang.Override public long getBlockPoolUsed() { return blockPoolUsed_; } /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @param value The blockPoolUsed to set. * @return This builder for chaining. */ public Builder setBlockPoolUsed(long value) { blockPoolUsed_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional uint64 blockPoolUsed = 6 [default = 0]; * @return This builder for chaining. */ public Builder clearBlockPoolUsed() { bitField0_ = (bitField0_ & ~0x00000020); blockPoolUsed_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_; /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * @return Whether the storage field is set. */ public boolean hasStorage() { return ((bitField0_ & 0x00000040) != 0); } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * @return The storage. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() { if (storageBuilder_ == null) { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } else { return storageBuilder_.getMessage(); } } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) { if (storageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storage_ = value; } else { storageBuilder_.setMessage(value); } bitField0_ |= 0x00000040; onChanged(); return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder setStorage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) { if (storageBuilder_ == null) { storage_ = builderForValue.build(); } else { storageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; onChanged(); return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) { if (storageBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && storage_ != null && storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) { getStorageBuilder().mergeFrom(value); } else { storage_ = value; } } else { storageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; onChanged(); return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder clearStorage() { bitField0_ = (bitField0_ & ~0x00000040); storage_ = null; if (storageBuilder_ != null) { storageBuilder_.dispose(); storageBuilder_ = null; } onChanged(); return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() { bitField0_ |= 0x00000040; onChanged(); return getStorageFieldBuilder().getBuilder(); } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() { if (storageBuilder_ != null) { return storageBuilder_.getMessageOrBuilder(); } else { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> getStorageFieldBuilder() { if (storageBuilder_ == null) { storageBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>( getStorage(), getParentForChildren(), isClean()); storage_ = null; } return storageBuilder_; } private long nonDfsUsed_ ; /** * optional uint64 nonDfsUsed = 8; * @return Whether the nonDfsUsed field is set. */ @java.lang.Override public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 nonDfsUsed = 8; * @return The nonDfsUsed. */ @java.lang.Override public long getNonDfsUsed() { return nonDfsUsed_; } /** * optional uint64 nonDfsUsed = 8; * @param value The nonDfsUsed to set. * @return This builder for chaining. */ public Builder setNonDfsUsed(long value) { nonDfsUsed_ = value; bitField0_ |= 0x00000080; onChanged(); return this; } /** * optional uint64 nonDfsUsed = 8; * @return This builder for chaining. */ public Builder clearNonDfsUsed() { bitField0_ = (bitField0_ & ~0x00000080); nonDfsUsed_ = 0L; onChanged(); return this; } private java.lang.Object mount_ = ""; /** * optional string mount = 9; * @return Whether the mount field is set. */ public boolean hasMount() { return ((bitField0_ & 0x00000100) != 0); } /** * optional string mount = 9; * @return The mount. */ public java.lang.String getMount() { java.lang.Object ref = mount_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { mount_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string mount = 9; * @return The bytes for mount. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getMountBytes() { java.lang.Object ref = mount_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); mount_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string mount = 9; * @param value The mount to set. * @return This builder for chaining. */ public Builder setMount( java.lang.String value) { if (value == null) { throw new NullPointerException(); } mount_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** * optional string mount = 9; * @return This builder for chaining. */ public Builder clearMount() { mount_ = getDefaultInstance().getMount(); bitField0_ = (bitField0_ & ~0x00000100); onChanged(); return this; } /** * optional string mount = 9; * @param value The bytes for mount to set. * @return This builder for chaining. */ public Builder setMountBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } mount_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageReportProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageReportProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageReportProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ContentSummaryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ContentSummaryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 length = 1; * @return Whether the length field is set. */ boolean hasLength(); /** * required uint64 length = 1; * @return The length. */ long getLength(); /** * required uint64 fileCount = 2; * @return Whether the fileCount field is set. */ boolean hasFileCount(); /** * required uint64 fileCount = 2; * @return The fileCount. */ long getFileCount(); /** * required uint64 directoryCount = 3; * @return Whether the directoryCount field is set. */ boolean hasDirectoryCount(); /** * required uint64 directoryCount = 3; * @return The directoryCount. */ long getDirectoryCount(); /** * required uint64 quota = 4; * @return Whether the quota field is set. */ boolean hasQuota(); /** * required uint64 quota = 4; * @return The quota. */ long getQuota(); /** * required uint64 spaceConsumed = 5; * @return Whether the spaceConsumed field is set. */ boolean hasSpaceConsumed(); /** * required uint64 spaceConsumed = 5; * @return The spaceConsumed. */ long getSpaceConsumed(); /** * required uint64 spaceQuota = 6; * @return Whether the spaceQuota field is set. */ boolean hasSpaceQuota(); /** * required uint64 spaceQuota = 6; * @return The spaceQuota. */ long getSpaceQuota(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; * @return Whether the typeQuotaInfos field is set. */ boolean hasTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; * @return The typeQuotaInfos. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder(); /** * optional uint64 snapshotLength = 8; * @return Whether the snapshotLength field is set. */ boolean hasSnapshotLength(); /** * optional uint64 snapshotLength = 8; * @return The snapshotLength. */ long getSnapshotLength(); /** * optional uint64 snapshotFileCount = 9; * @return Whether the snapshotFileCount field is set. */ boolean hasSnapshotFileCount(); /** * optional uint64 snapshotFileCount = 9; * @return The snapshotFileCount. */ long getSnapshotFileCount(); /** * optional uint64 snapshotDirectoryCount = 10; * @return Whether the snapshotDirectoryCount field is set. */ boolean hasSnapshotDirectoryCount(); /** * optional uint64 snapshotDirectoryCount = 10; * @return The snapshotDirectoryCount. */ long getSnapshotDirectoryCount(); /** * optional uint64 snapshotSpaceConsumed = 11; * @return Whether the snapshotSpaceConsumed field is set. */ boolean hasSnapshotSpaceConsumed(); /** * optional uint64 snapshotSpaceConsumed = 11; * @return The snapshotSpaceConsumed. */ long getSnapshotSpaceConsumed(); /** * optional string erasureCodingPolicy = 12; * @return Whether the erasureCodingPolicy field is set. */ boolean hasErasureCodingPolicy(); /** * optional string erasureCodingPolicy = 12; * @return The erasureCodingPolicy. */ java.lang.String getErasureCodingPolicy(); /** * optional string erasureCodingPolicy = 12; * @return The bytes for erasureCodingPolicy. */ org.apache.hadoop.thirdparty.protobuf.ByteString getErasureCodingPolicyBytes(); } /** *
   **
   * Summary of a file or directory
   * 
* * Protobuf type {@code hadoop.hdfs.ContentSummaryProto} */ public static final class ContentSummaryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ContentSummaryProto) ContentSummaryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ContentSummaryProto.newBuilder() to construct. private ContentSummaryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ContentSummaryProto() { erasureCodingPolicy_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ContentSummaryProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); } private int bitField0_; public static final int LENGTH_FIELD_NUMBER = 1; private long length_ = 0L; /** * required uint64 length = 1; * @return Whether the length field is set. */ @java.lang.Override public boolean hasLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 length = 1; * @return The length. */ @java.lang.Override public long getLength() { return length_; } public static final int FILECOUNT_FIELD_NUMBER = 2; private long fileCount_ = 0L; /** * required uint64 fileCount = 2; * @return Whether the fileCount field is set. */ @java.lang.Override public boolean hasFileCount() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 fileCount = 2; * @return The fileCount. */ @java.lang.Override public long getFileCount() { return fileCount_; } public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3; private long directoryCount_ = 0L; /** * required uint64 directoryCount = 3; * @return Whether the directoryCount field is set. */ @java.lang.Override public boolean hasDirectoryCount() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 directoryCount = 3; * @return The directoryCount. */ @java.lang.Override public long getDirectoryCount() { return directoryCount_; } public static final int QUOTA_FIELD_NUMBER = 4; private long quota_ = 0L; /** * required uint64 quota = 4; * @return Whether the quota field is set. */ @java.lang.Override public boolean hasQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 quota = 4; * @return The quota. */ @java.lang.Override public long getQuota() { return quota_; } public static final int SPACECONSUMED_FIELD_NUMBER = 5; private long spaceConsumed_ = 0L; /** * required uint64 spaceConsumed = 5; * @return Whether the spaceConsumed field is set. */ @java.lang.Override public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 spaceConsumed = 5; * @return The spaceConsumed. */ @java.lang.Override public long getSpaceConsumed() { return spaceConsumed_; } public static final int SPACEQUOTA_FIELD_NUMBER = 6; private long spaceQuota_ = 0L; /** * required uint64 spaceQuota = 6; * @return Whether the spaceQuota field is set. */ @java.lang.Override public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 spaceQuota = 6; * @return The spaceQuota. */ @java.lang.Override public long getSpaceQuota() { return spaceQuota_; } public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; * @return Whether the typeQuotaInfos field is set. */ @java.lang.Override public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; * @return The typeQuotaInfos. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } public static final int SNAPSHOTLENGTH_FIELD_NUMBER = 8; private long snapshotLength_ = 0L; /** * optional uint64 snapshotLength = 8; * @return Whether the snapshotLength field is set. */ @java.lang.Override public boolean hasSnapshotLength() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 snapshotLength = 8; * @return The snapshotLength. */ @java.lang.Override public long getSnapshotLength() { return snapshotLength_; } public static final int SNAPSHOTFILECOUNT_FIELD_NUMBER = 9; private long snapshotFileCount_ = 0L; /** * optional uint64 snapshotFileCount = 9; * @return Whether the snapshotFileCount field is set. */ @java.lang.Override public boolean hasSnapshotFileCount() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 snapshotFileCount = 9; * @return The snapshotFileCount. */ @java.lang.Override public long getSnapshotFileCount() { return snapshotFileCount_; } public static final int SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER = 10; private long snapshotDirectoryCount_ = 0L; /** * optional uint64 snapshotDirectoryCount = 10; * @return Whether the snapshotDirectoryCount field is set. */ @java.lang.Override public boolean hasSnapshotDirectoryCount() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint64 snapshotDirectoryCount = 10; * @return The snapshotDirectoryCount. */ @java.lang.Override public long getSnapshotDirectoryCount() { return snapshotDirectoryCount_; } public static final int SNAPSHOTSPACECONSUMED_FIELD_NUMBER = 11; private long snapshotSpaceConsumed_ = 0L; /** * optional uint64 snapshotSpaceConsumed = 11; * @return Whether the snapshotSpaceConsumed field is set. */ @java.lang.Override public boolean hasSnapshotSpaceConsumed() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 snapshotSpaceConsumed = 11; * @return The snapshotSpaceConsumed. */ @java.lang.Override public long getSnapshotSpaceConsumed() { return snapshotSpaceConsumed_; } public static final int ERASURECODINGPOLICY_FIELD_NUMBER = 12; @SuppressWarnings("serial") private volatile java.lang.Object erasureCodingPolicy_ = ""; /** * optional string erasureCodingPolicy = 12; * @return Whether the erasureCodingPolicy field is set. */ @java.lang.Override public boolean hasErasureCodingPolicy() { return ((bitField0_ & 0x00000800) != 0); } /** * optional string erasureCodingPolicy = 12; * @return The erasureCodingPolicy. */ @java.lang.Override public java.lang.String getErasureCodingPolicy() { java.lang.Object ref = erasureCodingPolicy_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { erasureCodingPolicy_ = s; } return s; } } /** * optional string erasureCodingPolicy = 12; * @return The bytes for erasureCodingPolicy. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getErasureCodingPolicyBytes() { java.lang.Object ref = erasureCodingPolicy_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); erasureCodingPolicy_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasFileCount()) { memoizedIsInitialized = 0; return false; } if (!hasDirectoryCount()) { memoizedIsInitialized = 0; return false; } if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceConsumed()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceQuota()) { memoizedIsInitialized = 0; return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, length_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, fileCount_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, directoryCount_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, quota_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, spaceConsumed_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, spaceQuota_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeMessage(7, getTypeQuotaInfos()); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt64(8, snapshotLength_); } if (((bitField0_ & 0x00000100) != 0)) { output.writeUInt64(9, snapshotFileCount_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeUInt64(10, snapshotDirectoryCount_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeUInt64(11, snapshotSpaceConsumed_); } if (((bitField0_ & 0x00000800) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 12, erasureCodingPolicy_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, length_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, fileCount_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, directoryCount_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, quota_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, spaceConsumed_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, spaceQuota_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getTypeQuotaInfos()); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(8, snapshotLength_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(9, snapshotFileCount_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(10, snapshotDirectoryCount_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(11, snapshotSpaceConsumed_); } if (((bitField0_ & 0x00000800) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(12, erasureCodingPolicy_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj; if (hasLength() != other.hasLength()) return false; if (hasLength()) { if (getLength() != other.getLength()) return false; } if (hasFileCount() != other.hasFileCount()) return false; if (hasFileCount()) { if (getFileCount() != other.getFileCount()) return false; } if (hasDirectoryCount() != other.hasDirectoryCount()) return false; if (hasDirectoryCount()) { if (getDirectoryCount() != other.getDirectoryCount()) return false; } if (hasQuota() != other.hasQuota()) return false; if (hasQuota()) { if (getQuota() != other.getQuota()) return false; } if (hasSpaceConsumed() != other.hasSpaceConsumed()) return false; if (hasSpaceConsumed()) { if (getSpaceConsumed() != other.getSpaceConsumed()) return false; } if (hasSpaceQuota() != other.hasSpaceQuota()) return false; if (hasSpaceQuota()) { if (getSpaceQuota() != other.getSpaceQuota()) return false; } if (hasTypeQuotaInfos() != other.hasTypeQuotaInfos()) return false; if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos() .equals(other.getTypeQuotaInfos())) return false; } if (hasSnapshotLength() != other.hasSnapshotLength()) return false; if (hasSnapshotLength()) { if (getSnapshotLength() != other.getSnapshotLength()) return false; } if (hasSnapshotFileCount() != other.hasSnapshotFileCount()) return false; if (hasSnapshotFileCount()) { if (getSnapshotFileCount() != other.getSnapshotFileCount()) return false; } if (hasSnapshotDirectoryCount() != other.hasSnapshotDirectoryCount()) return false; if (hasSnapshotDirectoryCount()) { if (getSnapshotDirectoryCount() != other.getSnapshotDirectoryCount()) return false; } if (hasSnapshotSpaceConsumed() != other.hasSnapshotSpaceConsumed()) return false; if (hasSnapshotSpaceConsumed()) { if (getSnapshotSpaceConsumed() != other.getSnapshotSpaceConsumed()) return false; } if (hasErasureCodingPolicy() != other.hasErasureCodingPolicy()) return false; if (hasErasureCodingPolicy()) { if (!getErasureCodingPolicy() .equals(other.getErasureCodingPolicy())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLength()); } if (hasFileCount()) { hash = (37 * hash) + FILECOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileCount()); } if (hasDirectoryCount()) { hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDirectoryCount()); } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getQuota()); } if (hasSpaceConsumed()) { hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceConsumed()); } if (hasSpaceQuota()) { hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceQuota()); } if (hasTypeQuotaInfos()) { hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfos().hashCode(); } if (hasSnapshotLength()) { hash = (37 * hash) + SNAPSHOTLENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotLength()); } if (hasSnapshotFileCount()) { hash = (37 * hash) + SNAPSHOTFILECOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotFileCount()); } if (hasSnapshotDirectoryCount()) { hash = (37 * hash) + SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotDirectoryCount()); } if (hasSnapshotSpaceConsumed()) { hash = (37 * hash) + SNAPSHOTSPACECONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotSpaceConsumed()); } if (hasErasureCodingPolicy()) { hash = (37 * hash) + ERASURECODINGPOLICY_FIELD_NUMBER; hash = (53 * hash) + getErasureCodingPolicy().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Summary of a file or directory
     * 
* * Protobuf type {@code hadoop.hdfs.ContentSummaryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ContentSummaryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getTypeQuotaInfosFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; length_ = 0L; fileCount_ = 0L; directoryCount_ = 0L; quota_ = 0L; spaceConsumed_ = 0L; spaceQuota_ = 0L; typeQuotaInfos_ = null; if (typeQuotaInfosBuilder_ != null) { typeQuotaInfosBuilder_.dispose(); typeQuotaInfosBuilder_ = null; } snapshotLength_ = 0L; snapshotFileCount_ = 0L; snapshotDirectoryCount_ = 0L; snapshotSpaceConsumed_ = 0L; erasureCodingPolicy_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.length_ = length_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.fileCount_ = fileCount_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.directoryCount_ = directoryCount_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.quota_ = quota_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.spaceConsumed_ = spaceConsumed_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.spaceQuota_ = spaceQuota_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.typeQuotaInfos_ = typeQuotaInfosBuilder_ == null ? typeQuotaInfos_ : typeQuotaInfosBuilder_.build(); to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.snapshotLength_ = snapshotLength_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { result.snapshotFileCount_ = snapshotFileCount_; to_bitField0_ |= 0x00000100; } if (((from_bitField0_ & 0x00000200) != 0)) { result.snapshotDirectoryCount_ = snapshotDirectoryCount_; to_bitField0_ |= 0x00000200; } if (((from_bitField0_ & 0x00000400) != 0)) { result.snapshotSpaceConsumed_ = snapshotSpaceConsumed_; to_bitField0_ |= 0x00000400; } if (((from_bitField0_ & 0x00000800) != 0)) { result.erasureCodingPolicy_ = erasureCodingPolicy_; to_bitField0_ |= 0x00000800; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this; if (other.hasLength()) { setLength(other.getLength()); } if (other.hasFileCount()) { setFileCount(other.getFileCount()); } if (other.hasDirectoryCount()) { setDirectoryCount(other.getDirectoryCount()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasSpaceConsumed()) { setSpaceConsumed(other.getSpaceConsumed()); } if (other.hasSpaceQuota()) { setSpaceQuota(other.getSpaceQuota()); } if (other.hasTypeQuotaInfos()) { mergeTypeQuotaInfos(other.getTypeQuotaInfos()); } if (other.hasSnapshotLength()) { setSnapshotLength(other.getSnapshotLength()); } if (other.hasSnapshotFileCount()) { setSnapshotFileCount(other.getSnapshotFileCount()); } if (other.hasSnapshotDirectoryCount()) { setSnapshotDirectoryCount(other.getSnapshotDirectoryCount()); } if (other.hasSnapshotSpaceConsumed()) { setSnapshotSpaceConsumed(other.getSnapshotSpaceConsumed()); } if (other.hasErasureCodingPolicy()) { erasureCodingPolicy_ = other.erasureCodingPolicy_; bitField0_ |= 0x00000800; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasLength()) { return false; } if (!hasFileCount()) { return false; } if (!hasDirectoryCount()) { return false; } if (!hasQuota()) { return false; } if (!hasSpaceConsumed()) { return false; } if (!hasSpaceQuota()) { return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { length_ = input.readUInt64(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { fileCount_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { directoryCount_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { quota_ = input.readUInt64(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { spaceConsumed_ = input.readUInt64(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { spaceQuota_ = input.readUInt64(); bitField0_ |= 0x00000020; break; } // case 48 case 58: { input.readMessage( getTypeQuotaInfosFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000040; break; } // case 58 case 64: { snapshotLength_ = input.readUInt64(); bitField0_ |= 0x00000080; break; } // case 64 case 72: { snapshotFileCount_ = input.readUInt64(); bitField0_ |= 0x00000100; break; } // case 72 case 80: { snapshotDirectoryCount_ = input.readUInt64(); bitField0_ |= 0x00000200; break; } // case 80 case 88: { snapshotSpaceConsumed_ = input.readUInt64(); bitField0_ |= 0x00000400; break; } // case 88 case 98: { erasureCodingPolicy_ = input.readBytes(); bitField0_ |= 0x00000800; break; } // case 98 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private long length_ ; /** * required uint64 length = 1; * @return Whether the length field is set. */ @java.lang.Override public boolean hasLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 length = 1; * @return The length. */ @java.lang.Override public long getLength() { return length_; } /** * required uint64 length = 1; * @param value The length to set. * @return This builder for chaining. */ public Builder setLength(long value) { length_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required uint64 length = 1; * @return This builder for chaining. */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000001); length_ = 0L; onChanged(); return this; } private long fileCount_ ; /** * required uint64 fileCount = 2; * @return Whether the fileCount field is set. */ @java.lang.Override public boolean hasFileCount() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 fileCount = 2; * @return The fileCount. */ @java.lang.Override public long getFileCount() { return fileCount_; } /** * required uint64 fileCount = 2; * @param value The fileCount to set. * @return This builder for chaining. */ public Builder setFileCount(long value) { fileCount_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint64 fileCount = 2; * @return This builder for chaining. */ public Builder clearFileCount() { bitField0_ = (bitField0_ & ~0x00000002); fileCount_ = 0L; onChanged(); return this; } private long directoryCount_ ; /** * required uint64 directoryCount = 3; * @return Whether the directoryCount field is set. */ @java.lang.Override public boolean hasDirectoryCount() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 directoryCount = 3; * @return The directoryCount. */ @java.lang.Override public long getDirectoryCount() { return directoryCount_; } /** * required uint64 directoryCount = 3; * @param value The directoryCount to set. * @return This builder for chaining. */ public Builder setDirectoryCount(long value) { directoryCount_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint64 directoryCount = 3; * @return This builder for chaining. */ public Builder clearDirectoryCount() { bitField0_ = (bitField0_ & ~0x00000004); directoryCount_ = 0L; onChanged(); return this; } private long quota_ ; /** * required uint64 quota = 4; * @return Whether the quota field is set. */ @java.lang.Override public boolean hasQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 quota = 4; * @return The quota. */ @java.lang.Override public long getQuota() { return quota_; } /** * required uint64 quota = 4; * @param value The quota to set. * @return This builder for chaining. */ public Builder setQuota(long value) { quota_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required uint64 quota = 4; * @return This builder for chaining. */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000008); quota_ = 0L; onChanged(); return this; } private long spaceConsumed_ ; /** * required uint64 spaceConsumed = 5; * @return Whether the spaceConsumed field is set. */ @java.lang.Override public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 spaceConsumed = 5; * @return The spaceConsumed. */ @java.lang.Override public long getSpaceConsumed() { return spaceConsumed_; } /** * required uint64 spaceConsumed = 5; * @param value The spaceConsumed to set. * @return This builder for chaining. */ public Builder setSpaceConsumed(long value) { spaceConsumed_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required uint64 spaceConsumed = 5; * @return This builder for chaining. */ public Builder clearSpaceConsumed() { bitField0_ = (bitField0_ & ~0x00000010); spaceConsumed_ = 0L; onChanged(); return this; } private long spaceQuota_ ; /** * required uint64 spaceQuota = 6; * @return Whether the spaceQuota field is set. */ @java.lang.Override public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 spaceQuota = 6; * @return The spaceQuota. */ @java.lang.Override public long getSpaceQuota() { return spaceQuota_; } /** * required uint64 spaceQuota = 6; * @param value The spaceQuota to set. * @return This builder for chaining. */ public Builder setSpaceQuota(long value) { spaceQuota_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * required uint64 spaceQuota = 6; * @return This builder for chaining. */ public Builder clearSpaceQuota() { bitField0_ = (bitField0_ & ~0x00000020); spaceQuota_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; * @return Whether the typeQuotaInfos field is set. */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; * @return The typeQuotaInfos. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } else { return typeQuotaInfosBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (value == null) { throw new NullPointerException(); } typeQuotaInfos_ = value; } else { typeQuotaInfosBuilder_.setMessage(value); } bitField0_ |= 0x00000040; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder setTypeQuotaInfos( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = builderForValue.build(); } else { typeQuotaInfosBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && typeQuotaInfos_ != null && typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) { getTypeQuotaInfosBuilder().mergeFrom(value); } else { typeQuotaInfos_ = value; } } else { typeQuotaInfosBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder clearTypeQuotaInfos() { bitField0_ = (bitField0_ & ~0x00000040); typeQuotaInfos_ = null; if (typeQuotaInfosBuilder_ != null) { typeQuotaInfosBuilder_.dispose(); typeQuotaInfosBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() { bitField0_ |= 0x00000040; onChanged(); return getTypeQuotaInfosFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { if (typeQuotaInfosBuilder_ != null) { return typeQuotaInfosBuilder_.getMessageOrBuilder(); } else { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> getTypeQuotaInfosFieldBuilder() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfosBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>( getTypeQuotaInfos(), getParentForChildren(), isClean()); typeQuotaInfos_ = null; } return typeQuotaInfosBuilder_; } private long snapshotLength_ ; /** * optional uint64 snapshotLength = 8; * @return Whether the snapshotLength field is set. */ @java.lang.Override public boolean hasSnapshotLength() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 snapshotLength = 8; * @return The snapshotLength. */ @java.lang.Override public long getSnapshotLength() { return snapshotLength_; } /** * optional uint64 snapshotLength = 8; * @param value The snapshotLength to set. * @return This builder for chaining. */ public Builder setSnapshotLength(long value) { snapshotLength_ = value; bitField0_ |= 0x00000080; onChanged(); return this; } /** * optional uint64 snapshotLength = 8; * @return This builder for chaining. */ public Builder clearSnapshotLength() { bitField0_ = (bitField0_ & ~0x00000080); snapshotLength_ = 0L; onChanged(); return this; } private long snapshotFileCount_ ; /** * optional uint64 snapshotFileCount = 9; * @return Whether the snapshotFileCount field is set. */ @java.lang.Override public boolean hasSnapshotFileCount() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 snapshotFileCount = 9; * @return The snapshotFileCount. */ @java.lang.Override public long getSnapshotFileCount() { return snapshotFileCount_; } /** * optional uint64 snapshotFileCount = 9; * @param value The snapshotFileCount to set. * @return This builder for chaining. */ public Builder setSnapshotFileCount(long value) { snapshotFileCount_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** * optional uint64 snapshotFileCount = 9; * @return This builder for chaining. */ public Builder clearSnapshotFileCount() { bitField0_ = (bitField0_ & ~0x00000100); snapshotFileCount_ = 0L; onChanged(); return this; } private long snapshotDirectoryCount_ ; /** * optional uint64 snapshotDirectoryCount = 10; * @return Whether the snapshotDirectoryCount field is set. */ @java.lang.Override public boolean hasSnapshotDirectoryCount() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint64 snapshotDirectoryCount = 10; * @return The snapshotDirectoryCount. */ @java.lang.Override public long getSnapshotDirectoryCount() { return snapshotDirectoryCount_; } /** * optional uint64 snapshotDirectoryCount = 10; * @param value The snapshotDirectoryCount to set. * @return This builder for chaining. */ public Builder setSnapshotDirectoryCount(long value) { snapshotDirectoryCount_ = value; bitField0_ |= 0x00000200; onChanged(); return this; } /** * optional uint64 snapshotDirectoryCount = 10; * @return This builder for chaining. */ public Builder clearSnapshotDirectoryCount() { bitField0_ = (bitField0_ & ~0x00000200); snapshotDirectoryCount_ = 0L; onChanged(); return this; } private long snapshotSpaceConsumed_ ; /** * optional uint64 snapshotSpaceConsumed = 11; * @return Whether the snapshotSpaceConsumed field is set. */ @java.lang.Override public boolean hasSnapshotSpaceConsumed() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 snapshotSpaceConsumed = 11; * @return The snapshotSpaceConsumed. */ @java.lang.Override public long getSnapshotSpaceConsumed() { return snapshotSpaceConsumed_; } /** * optional uint64 snapshotSpaceConsumed = 11; * @param value The snapshotSpaceConsumed to set. * @return This builder for chaining. */ public Builder setSnapshotSpaceConsumed(long value) { snapshotSpaceConsumed_ = value; bitField0_ |= 0x00000400; onChanged(); return this; } /** * optional uint64 snapshotSpaceConsumed = 11; * @return This builder for chaining. */ public Builder clearSnapshotSpaceConsumed() { bitField0_ = (bitField0_ & ~0x00000400); snapshotSpaceConsumed_ = 0L; onChanged(); return this; } private java.lang.Object erasureCodingPolicy_ = ""; /** * optional string erasureCodingPolicy = 12; * @return Whether the erasureCodingPolicy field is set. */ public boolean hasErasureCodingPolicy() { return ((bitField0_ & 0x00000800) != 0); } /** * optional string erasureCodingPolicy = 12; * @return The erasureCodingPolicy. */ public java.lang.String getErasureCodingPolicy() { java.lang.Object ref = erasureCodingPolicy_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { erasureCodingPolicy_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string erasureCodingPolicy = 12; * @return The bytes for erasureCodingPolicy. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getErasureCodingPolicyBytes() { java.lang.Object ref = erasureCodingPolicy_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); erasureCodingPolicy_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string erasureCodingPolicy = 12; * @param value The erasureCodingPolicy to set. * @return This builder for chaining. */ public Builder setErasureCodingPolicy( java.lang.String value) { if (value == null) { throw new NullPointerException(); } erasureCodingPolicy_ = value; bitField0_ |= 0x00000800; onChanged(); return this; } /** * optional string erasureCodingPolicy = 12; * @return This builder for chaining. */ public Builder clearErasureCodingPolicy() { erasureCodingPolicy_ = getDefaultInstance().getErasureCodingPolicy(); bitField0_ = (bitField0_ & ~0x00000800); onChanged(); return this; } /** * optional string erasureCodingPolicy = 12; * @param value The bytes for erasureCodingPolicy to set. * @return This builder for chaining. */ public Builder setErasureCodingPolicyBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } erasureCodingPolicy_ = value; bitField0_ |= 0x00000800; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ContentSummaryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ContentSummaryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ContentSummaryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface QuotaUsageProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.QuotaUsageProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 fileAndDirectoryCount = 1; * @return Whether the fileAndDirectoryCount field is set. */ boolean hasFileAndDirectoryCount(); /** * required uint64 fileAndDirectoryCount = 1; * @return The fileAndDirectoryCount. */ long getFileAndDirectoryCount(); /** * required uint64 quota = 2; * @return Whether the quota field is set. */ boolean hasQuota(); /** * required uint64 quota = 2; * @return The quota. */ long getQuota(); /** * required uint64 spaceConsumed = 3; * @return Whether the spaceConsumed field is set. */ boolean hasSpaceConsumed(); /** * required uint64 spaceConsumed = 3; * @return The spaceConsumed. */ long getSpaceConsumed(); /** * required uint64 spaceQuota = 4; * @return Whether the spaceQuota field is set. */ boolean hasSpaceQuota(); /** * required uint64 spaceQuota = 4; * @return The spaceQuota. */ long getSpaceQuota(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; * @return Whether the typeQuotaInfos field is set. */ boolean hasTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; * @return The typeQuotaInfos. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder(); } /** *
   **
   * Summary of quota usage of a directory
   * 
* * Protobuf type {@code hadoop.hdfs.QuotaUsageProto} */ public static final class QuotaUsageProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.QuotaUsageProto) QuotaUsageProtoOrBuilder { private static final long serialVersionUID = 0L; // Use QuotaUsageProto.newBuilder() to construct. private QuotaUsageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private QuotaUsageProto() { } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new QuotaUsageProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class); } private int bitField0_; public static final int FILEANDDIRECTORYCOUNT_FIELD_NUMBER = 1; private long fileAndDirectoryCount_ = 0L; /** * required uint64 fileAndDirectoryCount = 1; * @return Whether the fileAndDirectoryCount field is set. */ @java.lang.Override public boolean hasFileAndDirectoryCount() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileAndDirectoryCount = 1; * @return The fileAndDirectoryCount. */ @java.lang.Override public long getFileAndDirectoryCount() { return fileAndDirectoryCount_; } public static final int QUOTA_FIELD_NUMBER = 2; private long quota_ = 0L; /** * required uint64 quota = 2; * @return Whether the quota field is set. */ @java.lang.Override public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; * @return The quota. */ @java.lang.Override public long getQuota() { return quota_; } public static final int SPACECONSUMED_FIELD_NUMBER = 3; private long spaceConsumed_ = 0L; /** * required uint64 spaceConsumed = 3; * @return Whether the spaceConsumed field is set. */ @java.lang.Override public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 spaceConsumed = 3; * @return The spaceConsumed. */ @java.lang.Override public long getSpaceConsumed() { return spaceConsumed_; } public static final int SPACEQUOTA_FIELD_NUMBER = 4; private long spaceQuota_ = 0L; /** * required uint64 spaceQuota = 4; * @return Whether the spaceQuota field is set. */ @java.lang.Override public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 spaceQuota = 4; * @return The spaceQuota. */ @java.lang.Override public long getSpaceQuota() { return spaceQuota_; } public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; * @return Whether the typeQuotaInfos field is set. */ @java.lang.Override public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; * @return The typeQuotaInfos. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFileAndDirectoryCount()) { memoizedIsInitialized = 0; return false; } if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceConsumed()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceQuota()) { memoizedIsInitialized = 0; return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, fileAndDirectoryCount_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, spaceConsumed_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, spaceQuota_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(5, getTypeQuotaInfos()); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, fileAndDirectoryCount_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, spaceConsumed_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, spaceQuota_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getTypeQuotaInfos()); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) obj; if (hasFileAndDirectoryCount() != other.hasFileAndDirectoryCount()) return false; if (hasFileAndDirectoryCount()) { if (getFileAndDirectoryCount() != other.getFileAndDirectoryCount()) return false; } if (hasQuota() != other.hasQuota()) return false; if (hasQuota()) { if (getQuota() != other.getQuota()) return false; } if (hasSpaceConsumed() != other.hasSpaceConsumed()) return false; if (hasSpaceConsumed()) { if (getSpaceConsumed() != other.getSpaceConsumed()) return false; } if (hasSpaceQuota() != other.hasSpaceQuota()) return false; if (hasSpaceQuota()) { if (getSpaceQuota() != other.getSpaceQuota()) return false; } if (hasTypeQuotaInfos() != other.hasTypeQuotaInfos()) return false; if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos() .equals(other.getTypeQuotaInfos())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFileAndDirectoryCount()) { hash = (37 * hash) + FILEANDDIRECTORYCOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileAndDirectoryCount()); } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getQuota()); } if (hasSpaceConsumed()) { hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceConsumed()); } if (hasSpaceQuota()) { hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceQuota()); } if (hasTypeQuotaInfos()) { hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfos().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Summary of quota usage of a directory
     * 
* * Protobuf type {@code hadoop.hdfs.QuotaUsageProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.QuotaUsageProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getTypeQuotaInfosFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; fileAndDirectoryCount_ = 0L; quota_ = 0L; spaceConsumed_ = 0L; spaceQuota_ = 0L; typeQuotaInfos_ = null; if (typeQuotaInfosBuilder_ != null) { typeQuotaInfosBuilder_.dispose(); typeQuotaInfosBuilder_ = null; } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fileAndDirectoryCount_ = fileAndDirectoryCount_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.quota_ = quota_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.spaceConsumed_ = spaceConsumed_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.spaceQuota_ = spaceQuota_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.typeQuotaInfos_ = typeQuotaInfosBuilder_ == null ? typeQuotaInfos_ : typeQuotaInfosBuilder_.build(); to_bitField0_ |= 0x00000010; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance()) return this; if (other.hasFileAndDirectoryCount()) { setFileAndDirectoryCount(other.getFileAndDirectoryCount()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasSpaceConsumed()) { setSpaceConsumed(other.getSpaceConsumed()); } if (other.hasSpaceQuota()) { setSpaceQuota(other.getSpaceQuota()); } if (other.hasTypeQuotaInfos()) { mergeTypeQuotaInfos(other.getTypeQuotaInfos()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFileAndDirectoryCount()) { return false; } if (!hasQuota()) { return false; } if (!hasSpaceConsumed()) { return false; } if (!hasSpaceQuota()) { return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { fileAndDirectoryCount_ = input.readUInt64(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { quota_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { spaceConsumed_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { spaceQuota_ = input.readUInt64(); bitField0_ |= 0x00000008; break; } // case 32 case 42: { input.readMessage( getTypeQuotaInfosFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000010; break; } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private long fileAndDirectoryCount_ ; /** * required uint64 fileAndDirectoryCount = 1; * @return Whether the fileAndDirectoryCount field is set. */ @java.lang.Override public boolean hasFileAndDirectoryCount() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileAndDirectoryCount = 1; * @return The fileAndDirectoryCount. */ @java.lang.Override public long getFileAndDirectoryCount() { return fileAndDirectoryCount_; } /** * required uint64 fileAndDirectoryCount = 1; * @param value The fileAndDirectoryCount to set. * @return This builder for chaining. */ public Builder setFileAndDirectoryCount(long value) { fileAndDirectoryCount_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required uint64 fileAndDirectoryCount = 1; * @return This builder for chaining. */ public Builder clearFileAndDirectoryCount() { bitField0_ = (bitField0_ & ~0x00000001); fileAndDirectoryCount_ = 0L; onChanged(); return this; } private long quota_ ; /** * required uint64 quota = 2; * @return Whether the quota field is set. */ @java.lang.Override public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; * @return The quota. */ @java.lang.Override public long getQuota() { return quota_; } /** * required uint64 quota = 2; * @param value The quota to set. * @return This builder for chaining. */ public Builder setQuota(long value) { quota_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint64 quota = 2; * @return This builder for chaining. */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000002); quota_ = 0L; onChanged(); return this; } private long spaceConsumed_ ; /** * required uint64 spaceConsumed = 3; * @return Whether the spaceConsumed field is set. */ @java.lang.Override public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 spaceConsumed = 3; * @return The spaceConsumed. */ @java.lang.Override public long getSpaceConsumed() { return spaceConsumed_; } /** * required uint64 spaceConsumed = 3; * @param value The spaceConsumed to set. * @return This builder for chaining. */ public Builder setSpaceConsumed(long value) { spaceConsumed_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint64 spaceConsumed = 3; * @return This builder for chaining. */ public Builder clearSpaceConsumed() { bitField0_ = (bitField0_ & ~0x00000004); spaceConsumed_ = 0L; onChanged(); return this; } private long spaceQuota_ ; /** * required uint64 spaceQuota = 4; * @return Whether the spaceQuota field is set. */ @java.lang.Override public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 spaceQuota = 4; * @return The spaceQuota. */ @java.lang.Override public long getSpaceQuota() { return spaceQuota_; } /** * required uint64 spaceQuota = 4; * @param value The spaceQuota to set. * @return This builder for chaining. */ public Builder setSpaceQuota(long value) { spaceQuota_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required uint64 spaceQuota = 4; * @return This builder for chaining. */ public Builder clearSpaceQuota() { bitField0_ = (bitField0_ & ~0x00000008); spaceQuota_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; * @return Whether the typeQuotaInfos field is set. */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; * @return The typeQuotaInfos. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } else { return typeQuotaInfosBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (value == null) { throw new NullPointerException(); } typeQuotaInfos_ = value; } else { typeQuotaInfosBuilder_.setMessage(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder setTypeQuotaInfos( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = builderForValue.build(); } else { typeQuotaInfosBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && typeQuotaInfos_ != null && typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) { getTypeQuotaInfosBuilder().mergeFrom(value); } else { typeQuotaInfos_ = value; } } else { typeQuotaInfosBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder clearTypeQuotaInfos() { bitField0_ = (bitField0_ & ~0x00000010); typeQuotaInfos_ = null; if (typeQuotaInfosBuilder_ != null) { typeQuotaInfosBuilder_.dispose(); typeQuotaInfosBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() { bitField0_ |= 0x00000010; onChanged(); return getTypeQuotaInfosFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { if (typeQuotaInfosBuilder_ != null) { return typeQuotaInfosBuilder_.getMessageOrBuilder(); } else { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> getTypeQuotaInfosFieldBuilder() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfosBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>( getTypeQuotaInfos(), getParentForChildren(), isClean()); typeQuotaInfos_ = null; } return typeQuotaInfosBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.QuotaUsageProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.QuotaUsageProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public QuotaUsageProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageTypeQuotaInfosProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypeQuotaInfosProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ java.util.List getTypeQuotaInfoList(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ int getTypeQuotaInfoCount(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ java.util.List getTypeQuotaInfoOrBuilderList(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index); } /** *
   **
   * Storage type quota and usage information of a file or directory
   * 
* * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto} */ public static final class StorageTypeQuotaInfosProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypeQuotaInfosProto) StorageTypeQuotaInfosProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageTypeQuotaInfosProto.newBuilder() to construct. private StorageTypeQuotaInfosProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageTypeQuotaInfosProto() { typeQuotaInfo_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new StorageTypeQuotaInfosProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class); } public static final int TYPEQUOTAINFO_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List typeQuotaInfo_; /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ @java.lang.Override public java.util.List getTypeQuotaInfoList() { return typeQuotaInfo_; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ @java.lang.Override public java.util.List getTypeQuotaInfoOrBuilderList() { return typeQuotaInfo_; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ @java.lang.Override public int getTypeQuotaInfoCount() { return typeQuotaInfo_.size(); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) { return typeQuotaInfo_.get(index); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index) { return typeQuotaInfo_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getTypeQuotaInfoCount(); i++) { if (!getTypeQuotaInfo(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < typeQuotaInfo_.size(); i++) { output.writeMessage(1, typeQuotaInfo_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < typeQuotaInfo_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, typeQuotaInfo_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) obj; if (!getTypeQuotaInfoList() .equals(other.getTypeQuotaInfoList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getTypeQuotaInfoCount() > 0) { hash = (37 * hash) + TYPEQUOTAINFO_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfoList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Storage type quota and usage information of a file or directory
     * 
* * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypeQuotaInfosProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (typeQuotaInfoBuilder_ == null) { typeQuotaInfo_ = java.util.Collections.emptyList(); } else { typeQuotaInfo_ = null; typeQuotaInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result) { if (typeQuotaInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_); bitField0_ = (bitField0_ & ~0x00000001); } result.typeQuotaInfo_ = typeQuotaInfo_; } else { result.typeQuotaInfo_ = typeQuotaInfoBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result) { int from_bitField0_ = bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) return this; if (typeQuotaInfoBuilder_ == null) { if (!other.typeQuotaInfo_.isEmpty()) { if (typeQuotaInfo_.isEmpty()) { typeQuotaInfo_ = other.typeQuotaInfo_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.addAll(other.typeQuotaInfo_); } onChanged(); } } else { if (!other.typeQuotaInfo_.isEmpty()) { if (typeQuotaInfoBuilder_.isEmpty()) { typeQuotaInfoBuilder_.dispose(); typeQuotaInfoBuilder_ = null; typeQuotaInfo_ = other.typeQuotaInfo_; bitField0_ = (bitField0_ & ~0x00000001); typeQuotaInfoBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getTypeQuotaInfoFieldBuilder() : null; } else { typeQuotaInfoBuilder_.addAllMessages(other.typeQuotaInfo_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getTypeQuotaInfoCount(); i++) { if (!getTypeQuotaInfo(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.PARSER, extensionRegistry); if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(m); } else { typeQuotaInfoBuilder_.addMessage(m); } break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List typeQuotaInfo_ = java.util.Collections.emptyList(); private void ensureTypeQuotaInfoIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { typeQuotaInfo_ = new java.util.ArrayList(typeQuotaInfo_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> typeQuotaInfoBuilder_; /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoList() { if (typeQuotaInfoBuilder_ == null) { return java.util.Collections.unmodifiableList(typeQuotaInfo_); } else { return typeQuotaInfoBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public int getTypeQuotaInfoCount() { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.size(); } else { return typeQuotaInfoBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.get(index); } else { return typeQuotaInfoBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder setTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.set(index, value); onChanged(); } else { typeQuotaInfoBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder setTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.set(index, builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(value); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(index, value); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(index, builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addAllTypeQuotaInfo( java.lang.Iterable values) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, typeQuotaInfo_); onChanged(); } else { typeQuotaInfoBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder clearTypeQuotaInfo() { if (typeQuotaInfoBuilder_ == null) { typeQuotaInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { typeQuotaInfoBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder removeTypeQuotaInfo(int index) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.remove(index); onChanged(); } else { typeQuotaInfoBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder getTypeQuotaInfoBuilder( int index) { return getTypeQuotaInfoFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index) { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.get(index); } else { return typeQuotaInfoBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoOrBuilderList() { if (typeQuotaInfoBuilder_ != null) { return typeQuotaInfoBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(typeQuotaInfo_); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder() { return getTypeQuotaInfoFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder( int index) { return getTypeQuotaInfoFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoBuilderList() { return getTypeQuotaInfoFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> getTypeQuotaInfoFieldBuilder() { if (typeQuotaInfoBuilder_ == null) { typeQuotaInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>( typeQuotaInfo_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); typeQuotaInfo_ = null; } return typeQuotaInfoBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfosProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfosProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageTypeQuotaInfosProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageTypeQuotaInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypeQuotaInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @return Whether the type field is set. */ boolean hasType(); /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @return The type. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType(); /** * required uint64 quota = 2; * @return Whether the quota field is set. */ boolean hasQuota(); /** * required uint64 quota = 2; * @return The quota. */ long getQuota(); /** * required uint64 consumed = 3; * @return Whether the consumed field is set. */ boolean hasConsumed(); /** * required uint64 consumed = 3; * @return The consumed. */ long getConsumed(); } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto} */ public static final class StorageTypeQuotaInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypeQuotaInfoProto) StorageTypeQuotaInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageTypeQuotaInfoProto.newBuilder() to construct. private StorageTypeQuotaInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageTypeQuotaInfoProto() { type_ = 1; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new StorageTypeQuotaInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class); } private int bitField0_; public static final int TYPE_FIELD_NUMBER = 1; private int type_ = 1; /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @return Whether the type field is set. */ @java.lang.Override public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @return The type. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(type_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } public static final int QUOTA_FIELD_NUMBER = 2; private long quota_ = 0L; /** * required uint64 quota = 2; * @return Whether the quota field is set. */ @java.lang.Override public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; * @return The quota. */ @java.lang.Override public long getQuota() { return quota_; } public static final int CONSUMED_FIELD_NUMBER = 3; private long consumed_ = 0L; /** * required uint64 consumed = 3; * @return Whether the consumed field is set. */ @java.lang.Override public boolean hasConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 consumed = 3; * @return The consumed. */ @java.lang.Override public long getConsumed() { return consumed_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasConsumed()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, consumed_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, consumed_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) obj; if (hasType() != other.hasType()) return false; if (hasType()) { if (type_ != other.type_) return false; } if (hasQuota() != other.hasQuota()) return false; if (hasQuota()) { if (getQuota() != other.getQuota()) return false; } if (hasConsumed() != other.hasConsumed()) return false; if (hasConsumed()) { if (getConsumed() != other.getConsumed()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + type_; } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getQuota()); } if (hasConsumed()) { hash = (37 * hash) + CONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getConsumed()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypeQuotaInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; type_ = 1; quota_ = 0L; consumed_ = 0L; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.type_ = type_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.quota_ = quota_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.consumed_ = consumed_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()) return this; if (other.hasType()) { setType(other.getType()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasConsumed()) { setConsumed(other.getConsumed()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasQuota()) { return false; } if (!hasConsumed()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { type_ = tmpRaw; bitField0_ |= 0x00000001; } break; } // case 8 case 16: { quota_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { consumed_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int type_ = 1; /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @return Whether the type field is set. */ @java.lang.Override public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @return The type. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(type_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @param value The type to set. * @return This builder for chaining. */ public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; type_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; * @return This builder for chaining. */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); type_ = 1; onChanged(); return this; } private long quota_ ; /** * required uint64 quota = 2; * @return Whether the quota field is set. */ @java.lang.Override public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; * @return The quota. */ @java.lang.Override public long getQuota() { return quota_; } /** * required uint64 quota = 2; * @param value The quota to set. * @return This builder for chaining. */ public Builder setQuota(long value) { quota_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint64 quota = 2; * @return This builder for chaining. */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000002); quota_ = 0L; onChanged(); return this; } private long consumed_ ; /** * required uint64 consumed = 3; * @return Whether the consumed field is set. */ @java.lang.Override public boolean hasConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 consumed = 3; * @return The consumed. */ @java.lang.Override public long getConsumed() { return consumed_; } /** * required uint64 consumed = 3; * @param value The consumed to set. * @return This builder for chaining. */ public Builder setConsumed(long value) { consumed_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint64 consumed = 3; * @return This builder for chaining. */ public Builder clearConsumed() { bitField0_ = (bitField0_ & ~0x00000004); consumed_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageTypeQuotaInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface CorruptFileBlocksProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CorruptFileBlocksProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated string files = 1; * @return A list containing the files. */ java.util.List getFilesList(); /** * repeated string files = 1; * @return The count of files. */ int getFilesCount(); /** * repeated string files = 1; * @param index The index of the element to return. * @return The files at the given index. */ java.lang.String getFiles(int index); /** * repeated string files = 1; * @param index The index of the value to return. * @return The bytes of the files at the given index. */ org.apache.hadoop.thirdparty.protobuf.ByteString getFilesBytes(int index); /** * required string cookie = 2; * @return Whether the cookie field is set. */ boolean hasCookie(); /** * required string cookie = 2; * @return The cookie. */ java.lang.String getCookie(); /** * required string cookie = 2; * @return The bytes for cookie. */ org.apache.hadoop.thirdparty.protobuf.ByteString getCookieBytes(); } /** *
   **
   * Contains a list of paths corresponding to corrupt files and a cookie
   * used for iterative calls to NameNode.listCorruptFileBlocks.
   * 
* * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto} */ public static final class CorruptFileBlocksProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.CorruptFileBlocksProto) CorruptFileBlocksProtoOrBuilder { private static final long serialVersionUID = 0L; // Use CorruptFileBlocksProto.newBuilder() to construct. private CorruptFileBlocksProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private CorruptFileBlocksProto() { files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; cookie_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new CorruptFileBlocksProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); } private int bitField0_; public static final int FILES_FIELD_NUMBER = 1; @SuppressWarnings("serial") private org.apache.hadoop.thirdparty.protobuf.LazyStringList files_; /** * repeated string files = 1; * @return A list containing the files. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getFilesList() { return files_; } /** * repeated string files = 1; * @return The count of files. */ public int getFilesCount() { return files_.size(); } /** * repeated string files = 1; * @param index The index of the element to return. * @return The files at the given index. */ public java.lang.String getFiles(int index) { return files_.get(index); } /** * repeated string files = 1; * @param index The index of the value to return. * @return The bytes of the files at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFilesBytes(int index) { return files_.getByteString(index); } public static final int COOKIE_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object cookie_ = ""; /** * required string cookie = 2; * @return Whether the cookie field is set. */ @java.lang.Override public boolean hasCookie() { return ((bitField0_ & 0x00000001) != 0); } /** * required string cookie = 2; * @return The cookie. */ @java.lang.Override public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { cookie_ = s; } return s; } } /** * required string cookie = 2; * @return The bytes for cookie. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasCookie()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < files_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, files_.getRaw(i)); } if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, cookie_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < files_.size(); i++) { dataSize += computeStringSizeNoTag(files_.getRaw(i)); } size += dataSize; size += 1 * getFilesList().size(); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, cookie_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj; if (!getFilesList() .equals(other.getFilesList())) return false; if (hasCookie() != other.hasCookie()) return false; if (hasCookie()) { if (!getCookie() .equals(other.getCookie())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getFilesCount() > 0) { hash = (37 * hash) + FILES_FIELD_NUMBER; hash = (53 * hash) + getFilesList().hashCode(); } if (hasCookie()) { hash = (37 * hash) + COOKIE_FIELD_NUMBER; hash = (53 * hash) + getCookie().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Contains a list of paths corresponding to corrupt files and a cookie
     * used for iterative calls to NameNode.listCorruptFileBlocks.
     * 
* * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CorruptFileBlocksProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); cookie_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result) { if (((bitField0_ & 0x00000001) != 0)) { files_ = files_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000001); } result.files_ = files_; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000002) != 0)) { result.cookie_ = cookie_; to_bitField0_ |= 0x00000001; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this; if (!other.files_.isEmpty()) { if (files_.isEmpty()) { files_ = other.files_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureFilesIsMutable(); files_.addAll(other.files_); } onChanged(); } if (other.hasCookie()) { cookie_ = other.cookie_; bitField0_ |= 0x00000002; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasCookie()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); ensureFilesIsMutable(); files_.add(bs); break; } // case 10 case 18: { cookie_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.LazyStringList files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureFilesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { files_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(files_); bitField0_ |= 0x00000001; } } /** * repeated string files = 1; * @return A list containing the files. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getFilesList() { return files_.getUnmodifiableView(); } /** * repeated string files = 1; * @return The count of files. */ public int getFilesCount() { return files_.size(); } /** * repeated string files = 1; * @param index The index of the element to return. * @return The files at the given index. */ public java.lang.String getFiles(int index) { return files_.get(index); } /** * repeated string files = 1; * @param index The index of the value to return. * @return The bytes of the files at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFilesBytes(int index) { return files_.getByteString(index); } /** * repeated string files = 1; * @param index The index to set the value at. * @param value The files to set. * @return This builder for chaining. */ public Builder setFiles( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.set(index, value); onChanged(); return this; } /** * repeated string files = 1; * @param value The files to add. * @return This builder for chaining. */ public Builder addFiles( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.add(value); onChanged(); return this; } /** * repeated string files = 1; * @param values The files to add. * @return This builder for chaining. */ public Builder addAllFiles( java.lang.Iterable values) { ensureFilesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, files_); onChanged(); return this; } /** * repeated string files = 1; * @return This builder for chaining. */ public Builder clearFiles() { files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string files = 1; * @param value The bytes of the files to add. * @return This builder for chaining. */ public Builder addFilesBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.add(value); onChanged(); return this; } private java.lang.Object cookie_ = ""; /** * required string cookie = 2; * @return Whether the cookie field is set. */ public boolean hasCookie() { return ((bitField0_ & 0x00000002) != 0); } /** * required string cookie = 2; * @return The cookie. */ public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { cookie_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string cookie = 2; * @return The bytes for cookie. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string cookie = 2; * @param value The cookie to set. * @return This builder for chaining. */ public Builder setCookie( java.lang.String value) { if (value == null) { throw new NullPointerException(); } cookie_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string cookie = 2; * @return This builder for chaining. */ public Builder clearCookie() { cookie_ = getDefaultInstance().getCookie(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string cookie = 2; * @param value The bytes for cookie to set. * @return This builder for chaining. */ public Builder setCookieBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } cookie_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CorruptFileBlocksProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CorruptFileBlocksProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public CorruptFileBlocksProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageTypesProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypesProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @return A list containing the storageTypes. */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @return The count of storageTypes. */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @param index The index of the element to return. * @return The storageTypes at the given index. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); } /** *
   **
   * A list of storage types. 
   * 
* * Protobuf type {@code hadoop.hdfs.StorageTypesProto} */ public static final class StorageTypesProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypesProto) StorageTypesProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageTypesProto.newBuilder() to construct. private StorageTypesProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageTypesProto() { storageTypes_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new StorageTypesProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class); } public static final int STORAGETYPES_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List storageTypes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } }; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @return A list containing the storageTypes. */ @java.lang.Override public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @return The count of storageTypes. */ @java.lang.Override public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @param index The index of the element to return. * @return The storageTypes at the given index. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(1, storageTypes_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i)); } size += dataSize; size += 1 * storageTypes_.size(); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) obj; if (!storageTypes_.equals(other.storageTypes_)) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + storageTypes_.hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A list of storage types. 
     * 
* * Protobuf type {@code hadoop.hdfs.StorageTypesProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypesProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result) { if (((bitField0_ & 0x00000001) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000001); } result.storageTypes_ = storageTypes_; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result) { int from_bitField0_ = bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) return this; if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { ensureStorageTypesIsMutable(); storageTypes_.add(tmpRaw); } break; } // case 8 case 10: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { ensureStorageTypesIsMutable(); storageTypes_.add(tmpRaw); } } input.popLimit(oldLimit); break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000001; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @return A list containing the storageTypes. */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @return The count of storageTypes. */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @param index The index of the element to return. * @return The storageTypes at the given index. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @param index The index to set the value at. * @param value The storageTypes to set. * @return This builder for chaining. */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @param value The storageTypes to add. * @return This builder for chaining. */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @param values The storageTypes to add. * @return This builder for chaining. */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) { storageTypes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; * @return This builder for chaining. */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypesProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypesProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageTypesProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockStoragePolicyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockStoragePolicyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint32 policyId = 1; * @return Whether the policyId field is set. */ boolean hasPolicyId(); /** * required uint32 policyId = 1; * @return The policyId. */ int getPolicyId(); /** * required string name = 2; * @return Whether the name field is set. */ boolean hasName(); /** * required string name = 2; * @return The name. */ java.lang.String getName(); /** * required string name = 2; * @return The bytes for name. */ org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes(); /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * @return Whether the creationPolicy field is set. */ boolean hasCreationPolicy(); /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * @return The creationPolicy. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy(); /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder(); /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * @return Whether the creationFallbackPolicy field is set. */ boolean hasCreationFallbackPolicy(); /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * @return The creationFallbackPolicy. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy(); /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; * @return Whether the replicationFallbackPolicy field is set. */ boolean hasReplicationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; * @return The replicationFallbackPolicy. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder(); } /** *
   **
   * Block replica storage policy.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto} */ public static final class BlockStoragePolicyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockStoragePolicyProto) BlockStoragePolicyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockStoragePolicyProto.newBuilder() to construct. private BlockStoragePolicyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockStoragePolicyProto() { name_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new BlockStoragePolicyProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class); } private int bitField0_; public static final int POLICYID_FIELD_NUMBER = 1; private int policyId_ = 0; /** * required uint32 policyId = 1; * @return Whether the policyId field is set. */ @java.lang.Override public boolean hasPolicyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 policyId = 1; * @return The policyId. */ @java.lang.Override public int getPolicyId() { return policyId_; } public static final int NAME_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object name_ = ""; /** * required string name = 2; * @return Whether the name field is set. */ @java.lang.Override public boolean hasName() { return ((bitField0_ & 0x00000002) != 0); } /** * required string name = 2; * @return The name. */ @java.lang.Override public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * required string name = 2; * @return The bytes for name. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int CREATIONPOLICY_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_; /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * @return Whether the creationPolicy field is set. */ @java.lang.Override public boolean hasCreationPolicy() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * @return The creationPolicy. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } public static final int CREATIONFALLBACKPOLICY_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_; /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * @return Whether the creationFallbackPolicy field is set. */ @java.lang.Override public boolean hasCreationFallbackPolicy() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * @return The creationFallbackPolicy. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } public static final int REPLICATIONFALLBACKPOLICY_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_; /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; * @return Whether the replicationFallbackPolicy field is set. */ @java.lang.Override public boolean hasReplicationFallbackPolicy() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; * @return The replicationFallbackPolicy. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPolicyId()) { memoizedIsInitialized = 0; return false; } if (!hasName()) { memoizedIsInitialized = 0; return false; } if (!hasCreationPolicy()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, policyId_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, name_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeMessage(3, getCreationPolicy()); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getCreationFallbackPolicy()); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(5, getReplicationFallbackPolicy()); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, policyId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, name_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, getCreationPolicy()); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getCreationFallbackPolicy()); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getReplicationFallbackPolicy()); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) obj; if (hasPolicyId() != other.hasPolicyId()) return false; if (hasPolicyId()) { if (getPolicyId() != other.getPolicyId()) return false; } if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasCreationPolicy() != other.hasCreationPolicy()) return false; if (hasCreationPolicy()) { if (!getCreationPolicy() .equals(other.getCreationPolicy())) return false; } if (hasCreationFallbackPolicy() != other.hasCreationFallbackPolicy()) return false; if (hasCreationFallbackPolicy()) { if (!getCreationFallbackPolicy() .equals(other.getCreationFallbackPolicy())) return false; } if (hasReplicationFallbackPolicy() != other.hasReplicationFallbackPolicy()) return false; if (hasReplicationFallbackPolicy()) { if (!getReplicationFallbackPolicy() .equals(other.getReplicationFallbackPolicy())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPolicyId()) { hash = (37 * hash) + POLICYID_FIELD_NUMBER; hash = (53 * hash) + getPolicyId(); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasCreationPolicy()) { hash = (37 * hash) + CREATIONPOLICY_FIELD_NUMBER; hash = (53 * hash) + getCreationPolicy().hashCode(); } if (hasCreationFallbackPolicy()) { hash = (37 * hash) + CREATIONFALLBACKPOLICY_FIELD_NUMBER; hash = (53 * hash) + getCreationFallbackPolicy().hashCode(); } if (hasReplicationFallbackPolicy()) { hash = (37 * hash) + REPLICATIONFALLBACKPOLICY_FIELD_NUMBER; hash = (53 * hash) + getReplicationFallbackPolicy().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Block replica storage policy.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockStoragePolicyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getCreationPolicyFieldBuilder(); getCreationFallbackPolicyFieldBuilder(); getReplicationFallbackPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; policyId_ = 0; name_ = ""; creationPolicy_ = null; if (creationPolicyBuilder_ != null) { creationPolicyBuilder_.dispose(); creationPolicyBuilder_ = null; } creationFallbackPolicy_ = null; if (creationFallbackPolicyBuilder_ != null) { creationFallbackPolicyBuilder_.dispose(); creationFallbackPolicyBuilder_ = null; } replicationFallbackPolicy_ = null; if (replicationFallbackPolicyBuilder_ != null) { replicationFallbackPolicyBuilder_.dispose(); replicationFallbackPolicyBuilder_ = null; } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.policyId_ = policyId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.name_ = name_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.creationPolicy_ = creationPolicyBuilder_ == null ? creationPolicy_ : creationPolicyBuilder_.build(); to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.creationFallbackPolicy_ = creationFallbackPolicyBuilder_ == null ? creationFallbackPolicy_ : creationFallbackPolicyBuilder_.build(); to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.replicationFallbackPolicy_ = replicationFallbackPolicyBuilder_ == null ? replicationFallbackPolicy_ : replicationFallbackPolicyBuilder_.build(); to_bitField0_ |= 0x00000010; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()) return this; if (other.hasPolicyId()) { setPolicyId(other.getPolicyId()); } if (other.hasName()) { name_ = other.name_; bitField0_ |= 0x00000002; onChanged(); } if (other.hasCreationPolicy()) { mergeCreationPolicy(other.getCreationPolicy()); } if (other.hasCreationFallbackPolicy()) { mergeCreationFallbackPolicy(other.getCreationFallbackPolicy()); } if (other.hasReplicationFallbackPolicy()) { mergeReplicationFallbackPolicy(other.getReplicationFallbackPolicy()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPolicyId()) { return false; } if (!hasName()) { return false; } if (!hasCreationPolicy()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { policyId_ = input.readUInt32(); bitField0_ |= 0x00000001; break; } // case 8 case 18: { name_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { input.readMessage( getCreationPolicyFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000004; break; } // case 26 case 34: { input.readMessage( getCreationFallbackPolicyFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000008; break; } // case 34 case 42: { input.readMessage( getReplicationFallbackPolicyFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000010; break; } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int policyId_ ; /** * required uint32 policyId = 1; * @return Whether the policyId field is set. */ @java.lang.Override public boolean hasPolicyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 policyId = 1; * @return The policyId. */ @java.lang.Override public int getPolicyId() { return policyId_; } /** * required uint32 policyId = 1; * @param value The policyId to set. * @return This builder for chaining. */ public Builder setPolicyId(int value) { policyId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required uint32 policyId = 1; * @return This builder for chaining. */ public Builder clearPolicyId() { bitField0_ = (bitField0_ & ~0x00000001); policyId_ = 0; onChanged(); return this; } private java.lang.Object name_ = ""; /** * required string name = 2; * @return Whether the name field is set. */ public boolean hasName() { return ((bitField0_ & 0x00000002) != 0); } /** * required string name = 2; * @return The name. */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string name = 2; * @return The bytes for name. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string name = 2; * @param value The name to set. * @return This builder for chaining. */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } name_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string name = 2; * @return This builder for chaining. */ public Builder clearName() { name_ = getDefaultInstance().getName(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string name = 2; * @param value The bytes for name to set. * @return This builder for chaining. */ public Builder setNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } name_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationPolicyBuilder_; /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * @return Whether the creationPolicy field is set. */ public boolean hasCreationPolicy() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * @return The creationPolicy. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() { if (creationPolicyBuilder_ == null) { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } else { return creationPolicyBuilder_.getMessage(); } } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder setCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } creationPolicy_ = value; } else { creationPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000004; onChanged(); return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder setCreationPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (creationPolicyBuilder_ == null) { creationPolicy_ = builderForValue.build(); } else { creationPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; onChanged(); return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder mergeCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationPolicyBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0) && creationPolicy_ != null && creationPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { getCreationPolicyBuilder().mergeFrom(value); } else { creationPolicy_ = value; } } else { creationPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; onChanged(); return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder clearCreationPolicy() { bitField0_ = (bitField0_ & ~0x00000004); creationPolicy_ = null; if (creationPolicyBuilder_ != null) { creationPolicyBuilder_.dispose(); creationPolicyBuilder_ = null; } onChanged(); return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationPolicyBuilder() { bitField0_ |= 0x00000004; onChanged(); return getCreationPolicyFieldBuilder().getBuilder(); } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() { if (creationPolicyBuilder_ != null) { return creationPolicyBuilder_.getMessageOrBuilder(); } else { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getCreationPolicyFieldBuilder() { if (creationPolicyBuilder_ == null) { creationPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( getCreationPolicy(), getParentForChildren(), isClean()); creationPolicy_ = null; } return creationPolicyBuilder_; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationFallbackPolicyBuilder_; /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * @return Whether the creationFallbackPolicy field is set. */ public boolean hasCreationFallbackPolicy() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * @return The creationFallbackPolicy. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() { if (creationFallbackPolicyBuilder_ == null) { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } else { return creationFallbackPolicyBuilder_.getMessage(); } } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder setCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationFallbackPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } creationFallbackPolicy_ = value; } else { creationFallbackPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder setCreationFallbackPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicy_ = builderForValue.build(); } else { creationFallbackPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder mergeCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationFallbackPolicyBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && creationFallbackPolicy_ != null && creationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { getCreationFallbackPolicyBuilder().mergeFrom(value); } else { creationFallbackPolicy_ = value; } } else { creationFallbackPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder clearCreationFallbackPolicy() { bitField0_ = (bitField0_ & ~0x00000008); creationFallbackPolicy_ = null; if (creationFallbackPolicyBuilder_ != null) { creationFallbackPolicyBuilder_.dispose(); creationFallbackPolicyBuilder_ = null; } onChanged(); return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationFallbackPolicyBuilder() { bitField0_ |= 0x00000008; onChanged(); return getCreationFallbackPolicyFieldBuilder().getBuilder(); } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() { if (creationFallbackPolicyBuilder_ != null) { return creationFallbackPolicyBuilder_.getMessageOrBuilder(); } else { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getCreationFallbackPolicyFieldBuilder() { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( getCreationFallbackPolicy(), getParentForChildren(), isClean()); creationFallbackPolicy_ = null; } return creationFallbackPolicyBuilder_; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> replicationFallbackPolicyBuilder_; /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; * @return Whether the replicationFallbackPolicy field is set. */ public boolean hasReplicationFallbackPolicy() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; * @return The replicationFallbackPolicy. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() { if (replicationFallbackPolicyBuilder_ == null) { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } else { return replicationFallbackPolicyBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder setReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (replicationFallbackPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } replicationFallbackPolicy_ = value; } else { replicationFallbackPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder setReplicationFallbackPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicy_ = builderForValue.build(); } else { replicationFallbackPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder mergeReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (replicationFallbackPolicyBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && replicationFallbackPolicy_ != null && replicationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { getReplicationFallbackPolicyBuilder().mergeFrom(value); } else { replicationFallbackPolicy_ = value; } } else { replicationFallbackPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder clearReplicationFallbackPolicy() { bitField0_ = (bitField0_ & ~0x00000010); replicationFallbackPolicy_ = null; if (replicationFallbackPolicyBuilder_ != null) { replicationFallbackPolicyBuilder_.dispose(); replicationFallbackPolicyBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getReplicationFallbackPolicyBuilder() { bitField0_ |= 0x00000010; onChanged(); return getReplicationFallbackPolicyFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() { if (replicationFallbackPolicyBuilder_ != null) { return replicationFallbackPolicyBuilder_.getMessageOrBuilder(); } else { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getReplicationFallbackPolicyFieldBuilder() { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( getReplicationFallbackPolicy(), getParentForChildren(), isClean()); replicationFallbackPolicy_ = null; } return replicationFallbackPolicyBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockStoragePolicyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockStoragePolicyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockStoragePolicyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface LocatedBlockProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.LocatedBlockProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; * @return Whether the b field is set. */ boolean hasB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; * @return The b. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder(); /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; * @return Whether the offset field is set. */ boolean hasOffset(); /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; * @return The offset. */ long getOffset(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ java.util.List getLocsList(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ int getLocsCount(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ java.util.List getLocsOrBuilderList(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index); /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; * @return Whether the corrupt field is set. */ boolean hasCorrupt(); /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; * @return The corrupt. */ boolean getCorrupt(); /** * required .hadoop.common.TokenProto blockToken = 5; * @return Whether the blockToken field is set. */ boolean hasBlockToken(); /** * required .hadoop.common.TokenProto blockToken = 5; * @return The blockToken. */ org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken(); /** * required .hadoop.common.TokenProto blockToken = 5; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder(); /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; * @return A list containing the isCached. */ java.util.List getIsCachedList(); /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; * @return The count of isCached. */ int getIsCachedCount(); /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; * @param index The index of the element to return. * @return The isCached at the given index. */ boolean getIsCached(int index); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return A list containing the storageTypes. */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return The count of storageTypes. */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index of the element to return. * @return The storageTypes at the given index. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); /** * repeated string storageIDs = 8; * @return A list containing the storageIDs. */ java.util.List getStorageIDsList(); /** * repeated string storageIDs = 8; * @return The count of storageIDs. */ int getStorageIDsCount(); /** * repeated string storageIDs = 8; * @param index The index of the element to return. * @return The storageIDs at the given index. */ java.lang.String getStorageIDs(int index); /** * repeated string storageIDs = 8; * @param index The index of the value to return. * @return The bytes of the storageIDs at the given index. */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIDsBytes(int index); /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; * @return Whether the blockIndices field is set. */ boolean hasBlockIndices(); /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; * @return The blockIndices. */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ java.util.List getBlockTokensList(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ int getBlockTokensCount(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ java.util.List getBlockTokensOrBuilderList(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder( int index); } /** *
   **
   * A LocatedBlock gives information about a block and its location.
   * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlockProto} */ public static final class LocatedBlockProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.LocatedBlockProto) LocatedBlockProtoOrBuilder { private static final long serialVersionUID = 0L; // Use LocatedBlockProto.newBuilder() to construct. private LocatedBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private LocatedBlockProto() { locs_ = java.util.Collections.emptyList(); isCached_ = emptyBooleanList(); storageTypes_ = java.util.Collections.emptyList(); storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; blockTokens_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new LocatedBlockProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); } private int bitField0_; public static final int B_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; * @return Whether the b field is set. */ @java.lang.Override public boolean hasB() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; * @return The b. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } public static final int OFFSET_FIELD_NUMBER = 2; private long offset_ = 0L; /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; * @return Whether the offset field is set. */ @java.lang.Override public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; * @return The offset. */ @java.lang.Override public long getOffset() { return offset_; } public static final int LOCS_FIELD_NUMBER = 3; @SuppressWarnings("serial") private java.util.List locs_; /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ @java.lang.Override public java.util.List getLocsList() { return locs_; } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ @java.lang.Override public java.util.List getLocsOrBuilderList() { return locs_; } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ @java.lang.Override public int getLocsCount() { return locs_.size(); } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { return locs_.get(index); } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index) { return locs_.get(index); } public static final int CORRUPT_FIELD_NUMBER = 4; private boolean corrupt_ = false; /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; * @return Whether the corrupt field is set. */ @java.lang.Override public boolean hasCorrupt() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; * @return The corrupt. */ @java.lang.Override public boolean getCorrupt() { return corrupt_; } public static final int BLOCKTOKEN_FIELD_NUMBER = 5; private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_; /** * required .hadoop.common.TokenProto blockToken = 5; * @return Whether the blockToken field is set. */ @java.lang.Override public boolean hasBlockToken() { return ((bitField0_ & 0x00000008) != 0); } /** * required .hadoop.common.TokenProto blockToken = 5; * @return The blockToken. */ @java.lang.Override public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } /** * required .hadoop.common.TokenProto blockToken = 5; */ @java.lang.Override public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } public static final int ISCACHED_FIELD_NUMBER = 6; @SuppressWarnings("serial") private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList isCached_; /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; * @return A list containing the isCached. */ @java.lang.Override public java.util.List getIsCachedList() { return isCached_; } /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; * @return The count of isCached. */ public int getIsCachedCount() { return isCached_.size(); } /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; * @param index The index of the element to return. * @return The isCached at the given index. */ public boolean getIsCached(int index) { return isCached_.getBoolean(index); } private int isCachedMemoizedSerializedSize = -1; public static final int STORAGETYPES_FIELD_NUMBER = 7; @SuppressWarnings("serial") private java.util.List storageTypes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } }; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return A list containing the storageTypes. */ @java.lang.Override public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return The count of storageTypes. */ @java.lang.Override public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index of the element to return. * @return The storageTypes at the given index. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } public static final int STORAGEIDS_FIELD_NUMBER = 8; @SuppressWarnings("serial") private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIDs_; /** * repeated string storageIDs = 8; * @return A list containing the storageIDs. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIDsList() { return storageIDs_; } /** * repeated string storageIDs = 8; * @return The count of storageIDs. */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 8; * @param index The index of the element to return. * @return The storageIDs at the given index. */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 8; * @param index The index of the value to return. * @return The bytes of the storageIDs at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } public static final int BLOCKINDICES_FIELD_NUMBER = 9; private org.apache.hadoop.thirdparty.protobuf.ByteString blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; * @return Whether the blockIndices field is set. */ @java.lang.Override public boolean hasBlockIndices() { return ((bitField0_ & 0x00000010) != 0); } /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; * @return The blockIndices. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices() { return blockIndices_; } public static final int BLOCKTOKENS_FIELD_NUMBER = 10; @SuppressWarnings("serial") private java.util.List blockTokens_; /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ @java.lang.Override public java.util.List getBlockTokensList() { return blockTokens_; } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ @java.lang.Override public java.util.List getBlockTokensOrBuilderList() { return blockTokens_; } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ @java.lang.Override public int getBlockTokensCount() { return blockTokens_.size(); } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ @java.lang.Override public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) { return blockTokens_.get(index); } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ @java.lang.Override public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder( int index) { return blockTokens_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasB()) { memoizedIsInitialized = 0; return false; } if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasCorrupt()) { memoizedIsInitialized = 0; return false; } if (!hasBlockToken()) { memoizedIsInitialized = 0; return false; } if (!getB().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getLocsCount(); i++) { if (!getLocs(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (!getBlockToken().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getBlockTokensCount(); i++) { if (!getBlockTokens(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getB()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, offset_); } for (int i = 0; i < locs_.size(); i++) { output.writeMessage(3, locs_.get(i)); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBool(4, corrupt_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(5, getBlockToken()); } if (getIsCachedList().size() > 0) { output.writeUInt32NoTag(50); output.writeUInt32NoTag(isCachedMemoizedSerializedSize); } for (int i = 0; i < isCached_.size(); i++) { output.writeBoolNoTag(isCached_.getBoolean(i)); } for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(7, storageTypes_.get(i)); } for (int i = 0; i < storageIDs_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, storageIDs_.getRaw(i)); } if (((bitField0_ & 0x00000010) != 0)) { output.writeBytes(9, blockIndices_); } for (int i = 0; i < blockTokens_.size(); i++) { output.writeMessage(10, blockTokens_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getB()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, offset_); } for (int i = 0; i < locs_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, locs_.get(i)); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(4, corrupt_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getBlockToken()); } { int dataSize = 0; dataSize = 1 * getIsCachedList().size(); size += dataSize; if (!getIsCachedList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } isCachedMemoizedSerializedSize = dataSize; } { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i)); } size += dataSize; size += 1 * storageTypes_.size(); } { int dataSize = 0; for (int i = 0; i < storageIDs_.size(); i++) { dataSize += computeStringSizeNoTag(storageIDs_.getRaw(i)); } size += dataSize; size += 1 * getStorageIDsList().size(); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(9, blockIndices_); } for (int i = 0; i < blockTokens_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(10, blockTokens_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj; if (hasB() != other.hasB()) return false; if (hasB()) { if (!getB() .equals(other.getB())) return false; } if (hasOffset() != other.hasOffset()) return false; if (hasOffset()) { if (getOffset() != other.getOffset()) return false; } if (!getLocsList() .equals(other.getLocsList())) return false; if (hasCorrupt() != other.hasCorrupt()) return false; if (hasCorrupt()) { if (getCorrupt() != other.getCorrupt()) return false; } if (hasBlockToken() != other.hasBlockToken()) return false; if (hasBlockToken()) { if (!getBlockToken() .equals(other.getBlockToken())) return false; } if (!getIsCachedList() .equals(other.getIsCachedList())) return false; if (!storageTypes_.equals(other.storageTypes_)) return false; if (!getStorageIDsList() .equals(other.getStorageIDsList())) return false; if (hasBlockIndices() != other.hasBlockIndices()) return false; if (hasBlockIndices()) { if (!getBlockIndices() .equals(other.getBlockIndices())) return false; } if (!getBlockTokensList() .equals(other.getBlockTokensList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasB()) { hash = (37 * hash) + B_FIELD_NUMBER; hash = (53 * hash) + getB().hashCode(); } if (hasOffset()) { hash = (37 * hash) + OFFSET_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getOffset()); } if (getLocsCount() > 0) { hash = (37 * hash) + LOCS_FIELD_NUMBER; hash = (53 * hash) + getLocsList().hashCode(); } if (hasCorrupt()) { hash = (37 * hash) + CORRUPT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getCorrupt()); } if (hasBlockToken()) { hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER; hash = (53 * hash) + getBlockToken().hashCode(); } if (getIsCachedCount() > 0) { hash = (37 * hash) + ISCACHED_FIELD_NUMBER; hash = (53 * hash) + getIsCachedList().hashCode(); } if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + storageTypes_.hashCode(); } if (getStorageIDsCount() > 0) { hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageIDsList().hashCode(); } if (hasBlockIndices()) { hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER; hash = (53 * hash) + getBlockIndices().hashCode(); } if (getBlockTokensCount() > 0) { hash = (37 * hash) + BLOCKTOKENS_FIELD_NUMBER; hash = (53 * hash) + getBlockTokensList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A LocatedBlock gives information about a block and its location.
     * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlockProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.LocatedBlockProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getBFieldBuilder(); getLocsFieldBuilder(); getBlockTokenFieldBuilder(); getBlockTokensFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; b_ = null; if (bBuilder_ != null) { bBuilder_.dispose(); bBuilder_ = null; } offset_ = 0L; if (locsBuilder_ == null) { locs_ = java.util.Collections.emptyList(); } else { locs_ = null; locsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); corrupt_ = false; blockToken_ = null; if (blockTokenBuilder_ != null) { blockTokenBuilder_.dispose(); blockTokenBuilder_ = null; } isCached_ = emptyBooleanList(); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; if (blockTokensBuilder_ == null) { blockTokens_ = java.util.Collections.emptyList(); } else { blockTokens_ = null; blockTokensBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000200); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result) { if (locsBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0)) { locs_ = java.util.Collections.unmodifiableList(locs_); bitField0_ = (bitField0_ & ~0x00000004); } result.locs_ = locs_; } else { result.locs_ = locsBuilder_.build(); } if (((bitField0_ & 0x00000020) != 0)) { isCached_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000020); } result.isCached_ = isCached_; if (((bitField0_ & 0x00000040) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000040); } result.storageTypes_ = storageTypes_; if (((bitField0_ & 0x00000080) != 0)) { storageIDs_ = storageIDs_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000080); } result.storageIDs_ = storageIDs_; if (blockTokensBuilder_ == null) { if (((bitField0_ & 0x00000200) != 0)) { blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_); bitField0_ = (bitField0_ & ~0x00000200); } result.blockTokens_ = blockTokens_; } else { result.blockTokens_ = blockTokensBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.b_ = bBuilder_ == null ? b_ : bBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.offset_ = offset_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000008) != 0)) { result.corrupt_ = corrupt_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000010) != 0)) { result.blockToken_ = blockTokenBuilder_ == null ? blockToken_ : blockTokenBuilder_.build(); to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000100) != 0)) { result.blockIndices_ = blockIndices_; to_bitField0_ |= 0x00000010; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this; if (other.hasB()) { mergeB(other.getB()); } if (other.hasOffset()) { setOffset(other.getOffset()); } if (locsBuilder_ == null) { if (!other.locs_.isEmpty()) { if (locs_.isEmpty()) { locs_ = other.locs_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureLocsIsMutable(); locs_.addAll(other.locs_); } onChanged(); } } else { if (!other.locs_.isEmpty()) { if (locsBuilder_.isEmpty()) { locsBuilder_.dispose(); locsBuilder_ = null; locs_ = other.locs_; bitField0_ = (bitField0_ & ~0x00000004); locsBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getLocsFieldBuilder() : null; } else { locsBuilder_.addAllMessages(other.locs_); } } } if (other.hasCorrupt()) { setCorrupt(other.getCorrupt()); } if (other.hasBlockToken()) { mergeBlockToken(other.getBlockToken()); } if (!other.isCached_.isEmpty()) { if (isCached_.isEmpty()) { isCached_ = other.isCached_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureIsCachedIsMutable(); isCached_.addAll(other.isCached_); } onChanged(); } if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } if (!other.storageIDs_.isEmpty()) { if (storageIDs_.isEmpty()) { storageIDs_ = other.storageIDs_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureStorageIDsIsMutable(); storageIDs_.addAll(other.storageIDs_); } onChanged(); } if (other.hasBlockIndices()) { setBlockIndices(other.getBlockIndices()); } if (blockTokensBuilder_ == null) { if (!other.blockTokens_.isEmpty()) { if (blockTokens_.isEmpty()) { blockTokens_ = other.blockTokens_; bitField0_ = (bitField0_ & ~0x00000200); } else { ensureBlockTokensIsMutable(); blockTokens_.addAll(other.blockTokens_); } onChanged(); } } else { if (!other.blockTokens_.isEmpty()) { if (blockTokensBuilder_.isEmpty()) { blockTokensBuilder_.dispose(); blockTokensBuilder_ = null; blockTokens_ = other.blockTokens_; bitField0_ = (bitField0_ & ~0x00000200); blockTokensBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getBlockTokensFieldBuilder() : null; } else { blockTokensBuilder_.addAllMessages(other.blockTokens_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasB()) { return false; } if (!hasOffset()) { return false; } if (!hasCorrupt()) { return false; } if (!hasBlockToken()) { return false; } if (!getB().isInitialized()) { return false; } for (int i = 0; i < getLocsCount(); i++) { if (!getLocs(i).isInitialized()) { return false; } } if (!getBlockToken().isInitialized()) { return false; } for (int i = 0; i < getBlockTokensCount(); i++) { if (!getBlockTokens(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { input.readMessage( getBFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000001; break; } // case 10 case 16: { offset_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry); if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.add(m); } else { locsBuilder_.addMessage(m); } break; } // case 26 case 32: { corrupt_ = input.readBool(); bitField0_ |= 0x00000008; break; } // case 32 case 42: { input.readMessage( getBlockTokenFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000010; break; } // case 42 case 48: { boolean v = input.readBool(); ensureIsCachedIsMutable(); isCached_.addBoolean(v); break; } // case 48 case 50: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); ensureIsCachedIsMutable(); while (input.getBytesUntilLimit() > 0) { isCached_.addBoolean(input.readBool()); } input.popLimit(limit); break; } // case 50 case 56: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(7, tmpRaw); } else { ensureStorageTypesIsMutable(); storageTypes_.add(tmpRaw); } break; } // case 56 case 58: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(7, tmpRaw); } else { ensureStorageTypesIsMutable(); storageTypes_.add(tmpRaw); } } input.popLimit(oldLimit); break; } // case 58 case 66: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); ensureStorageIDsIsMutable(); storageIDs_.add(bs); break; } // case 66 case 74: { blockIndices_ = input.readBytes(); bitField0_ |= 0x00000100; break; } // case 74 case 82: { org.apache.hadoop.security.proto.SecurityProtos.TokenProto m = input.readMessage( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry); if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.add(m); } else { blockTokensBuilder_.addMessage(m); } break; } // case 82 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; * @return Whether the b field is set. */ public boolean hasB() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; * @return The b. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { if (bBuilder_ == null) { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } else { return bBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (value == null) { throw new NullPointerException(); } b_ = value; } else { bBuilder_.setMessage(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (bBuilder_ == null) { b_ = builderForValue.build(); } else { bBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && b_ != null && b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { getBBuilder().mergeFrom(value); } else { b_ = value; } } else { bBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder clearB() { bitField0_ = (bitField0_ & ~0x00000001); b_ = null; if (bBuilder_ != null) { bBuilder_.dispose(); bBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { if (bBuilder_ != null) { return bBuilder_.getMessageOrBuilder(); } else { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getBFieldBuilder() { if (bBuilder_ == null) { bBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( getB(), getParentForChildren(), isClean()); b_ = null; } return bBuilder_; } private long offset_ ; /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; * @return Whether the offset field is set. */ @java.lang.Override public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; * @return The offset. */ @java.lang.Override public long getOffset() { return offset_; } /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; * @param value The offset to set. * @return This builder for chaining. */ public Builder setOffset(long value) { offset_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; * @return This builder for chaining. */ public Builder clearOffset() { bitField0_ = (bitField0_ & ~0x00000002); offset_ = 0L; onChanged(); return this; } private java.util.List locs_ = java.util.Collections.emptyList(); private void ensureLocsIsMutable() { if (!((bitField0_ & 0x00000004) != 0)) { locs_ = new java.util.ArrayList(locs_); bitField0_ |= 0x00000004; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_; /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsList() { if (locsBuilder_ == null) { return java.util.Collections.unmodifiableList(locs_); } else { return locsBuilder_.getMessageList(); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public int getLocsCount() { if (locsBuilder_ == null) { return locs_.size(); } else { return locsBuilder_.getCount(); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { if (locsBuilder_ == null) { return locs_.get(index); } else { return locsBuilder_.getMessage(index); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder setLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.set(index, value); onChanged(); } else { locsBuilder_.setMessage(index, value); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder setLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.set(index, builderForValue.build()); onChanged(); } else { locsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.add(value); onChanged(); } else { locsBuilder_.addMessage(value); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.add(index, value); onChanged(); } else { locsBuilder_.addMessage(index, value); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.add(builderForValue.build()); onChanged(); } else { locsBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.add(index, builderForValue.build()); onChanged(); } else { locsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addAllLocs( java.lang.Iterable values) { if (locsBuilder_ == null) { ensureLocsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, locs_); onChanged(); } else { locsBuilder_.addAllMessages(values); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder clearLocs() { if (locsBuilder_ == null) { locs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { locsBuilder_.clear(); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder removeLocs(int index) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.remove(index); onChanged(); } else { locsBuilder_.remove(index); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder( int index) { return getLocsFieldBuilder().getBuilder(index); } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index) { if (locsBuilder_ == null) { return locs_.get(index); } else { return locsBuilder_.getMessageOrBuilder(index); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsOrBuilderList() { if (locsBuilder_ != null) { return locsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(locs_); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() { return getLocsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder( int index) { return getLocsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsBuilderList() { return getLocsFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getLocsFieldBuilder() { if (locsBuilder_ == null) { locsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( locs_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); locs_ = null; } return locsBuilder_; } private boolean corrupt_ ; /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; * @return Whether the corrupt field is set. */ @java.lang.Override public boolean hasCorrupt() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; * @return The corrupt. */ @java.lang.Override public boolean getCorrupt() { return corrupt_; } /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; * @param value The corrupt to set. * @return This builder for chaining. */ public Builder setCorrupt(boolean value) { corrupt_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; * @return This builder for chaining. */ public Builder clearCorrupt() { bitField0_ = (bitField0_ & ~0x00000008); corrupt_ = false; onChanged(); return this; } private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokenBuilder_; /** * required .hadoop.common.TokenProto blockToken = 5; * @return Whether the blockToken field is set. */ public boolean hasBlockToken() { return ((bitField0_ & 0x00000010) != 0); } /** * required .hadoop.common.TokenProto blockToken = 5; * @return The blockToken. */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() { if (blockTokenBuilder_ == null) { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } else { return blockTokenBuilder_.getMessage(); } } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder setBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokenBuilder_ == null) { if (value == null) { throw new NullPointerException(); } blockToken_ = value; } else { blockTokenBuilder_.setMessage(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder setBlockToken( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokenBuilder_ == null) { blockToken_ = builderForValue.build(); } else { blockTokenBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder mergeBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokenBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && blockToken_ != null && blockToken_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) { getBlockTokenBuilder().mergeFrom(value); } else { blockToken_ = value; } } else { blockTokenBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder clearBlockToken() { bitField0_ = (bitField0_ & ~0x00000010); blockToken_ = null; if (blockTokenBuilder_ != null) { blockTokenBuilder_.dispose(); blockTokenBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokenBuilder() { bitField0_ |= 0x00000010; onChanged(); return getBlockTokenFieldBuilder().getBuilder(); } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() { if (blockTokenBuilder_ != null) { return blockTokenBuilder_.getMessageOrBuilder(); } else { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } } /** * required .hadoop.common.TokenProto blockToken = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> getBlockTokenFieldBuilder() { if (blockTokenBuilder_ == null) { blockTokenBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>( getBlockToken(), getParentForChildren(), isClean()); blockToken_ = null; } return blockTokenBuilder_; } private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList isCached_ = emptyBooleanList(); private void ensureIsCachedIsMutable() { if (!((bitField0_ & 0x00000020) != 0)) { isCached_ = mutableCopy(isCached_); bitField0_ |= 0x00000020; } } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; * @return A list containing the isCached. */ public java.util.List getIsCachedList() { return ((bitField0_ & 0x00000020) != 0) ? java.util.Collections.unmodifiableList(isCached_) : isCached_; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; * @return The count of isCached. */ public int getIsCachedCount() { return isCached_.size(); } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; * @param index The index of the element to return. * @return The isCached at the given index. */ public boolean getIsCached(int index) { return isCached_.getBoolean(index); } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; * @param index The index to set the value at. * @param value The isCached to set. * @return This builder for chaining. */ public Builder setIsCached( int index, boolean value) { ensureIsCachedIsMutable(); isCached_.setBoolean(index, value); onChanged(); return this; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; * @param value The isCached to add. * @return This builder for chaining. */ public Builder addIsCached(boolean value) { ensureIsCachedIsMutable(); isCached_.addBoolean(value); onChanged(); return this; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; * @param values The isCached to add. * @return This builder for chaining. */ public Builder addAllIsCached( java.lang.Iterable values) { ensureIsCachedIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, isCached_); onChanged(); return this; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; * @return This builder for chaining. */ public Builder clearIsCached() { isCached_ = emptyBooleanList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000040; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return A list containing the storageTypes. */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return The count of storageTypes. */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index of the element to return. * @return The storageTypes at the given index. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index to set the value at. * @param value The storageTypes to set. * @return This builder for chaining. */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param value The storageTypes to add. * @return This builder for chaining. */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param values The storageTypes to add. * @return This builder for chaining. */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) { storageTypes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return This builder for chaining. */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageIDsIsMutable() { if (!((bitField0_ & 0x00000080) != 0)) { storageIDs_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageIDs_); bitField0_ |= 0x00000080; } } /** * repeated string storageIDs = 8; * @return A list containing the storageIDs. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIDsList() { return storageIDs_.getUnmodifiableView(); } /** * repeated string storageIDs = 8; * @return The count of storageIDs. */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 8; * @param index The index of the element to return. * @return The storageIDs at the given index. */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 8; * @param index The index of the value to return. * @return The bytes of the storageIDs at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } /** * repeated string storageIDs = 8; * @param index The index to set the value at. * @param value The storageIDs to set. * @return This builder for chaining. */ public Builder setStorageIDs( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.set(index, value); onChanged(); return this; } /** * repeated string storageIDs = 8; * @param value The storageIDs to add. * @return This builder for chaining. */ public Builder addStorageIDs( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } /** * repeated string storageIDs = 8; * @param values The storageIDs to add. * @return This builder for chaining. */ public Builder addAllStorageIDs( java.lang.Iterable values) { ensureStorageIDsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, storageIDs_); onChanged(); return this; } /** * repeated string storageIDs = 8; * @return This builder for chaining. */ public Builder clearStorageIDs() { storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } /** * repeated string storageIDs = 8; * @param value The bytes of the storageIDs to add. * @return This builder for chaining. */ public Builder addStorageIDsBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; * @return Whether the blockIndices field is set. */ @java.lang.Override public boolean hasBlockIndices() { return ((bitField0_ & 0x00000100) != 0); } /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; * @return The blockIndices. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices() { return blockIndices_; } /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; * @param value The blockIndices to set. * @return This builder for chaining. */ public Builder setBlockIndices(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } blockIndices_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; * @return This builder for chaining. */ public Builder clearBlockIndices() { bitField0_ = (bitField0_ & ~0x00000100); blockIndices_ = getDefaultInstance().getBlockIndices(); onChanged(); return this; } private java.util.List blockTokens_ = java.util.Collections.emptyList(); private void ensureBlockTokensIsMutable() { if (!((bitField0_ & 0x00000200) != 0)) { blockTokens_ = new java.util.ArrayList(blockTokens_); bitField0_ |= 0x00000200; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokensBuilder_; /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensList() { if (blockTokensBuilder_ == null) { return java.util.Collections.unmodifiableList(blockTokens_); } else { return blockTokensBuilder_.getMessageList(); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public int getBlockTokensCount() { if (blockTokensBuilder_ == null) { return blockTokens_.size(); } else { return blockTokensBuilder_.getCount(); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) { if (blockTokensBuilder_ == null) { return blockTokens_.get(index); } else { return blockTokensBuilder_.getMessage(index); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder setBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlockTokensIsMutable(); blockTokens_.set(index, value); onChanged(); } else { blockTokensBuilder_.setMessage(index, value); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder setBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.set(index, builderForValue.build()); onChanged(); } else { blockTokensBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlockTokensIsMutable(); blockTokens_.add(value); onChanged(); } else { blockTokensBuilder_.addMessage(value); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlockTokensIsMutable(); blockTokens_.add(index, value); onChanged(); } else { blockTokensBuilder_.addMessage(index, value); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.add(builderForValue.build()); onChanged(); } else { blockTokensBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.add(index, builderForValue.build()); onChanged(); } else { blockTokensBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addAllBlockTokens( java.lang.Iterable values) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, blockTokens_); onChanged(); } else { blockTokensBuilder_.addAllMessages(values); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder clearBlockTokens() { if (blockTokensBuilder_ == null) { blockTokens_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000200); onChanged(); } else { blockTokensBuilder_.clear(); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder removeBlockTokens(int index) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.remove(index); onChanged(); } else { blockTokensBuilder_.remove(index); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokensBuilder( int index) { return getBlockTokensFieldBuilder().getBuilder(index); } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder( int index) { if (blockTokensBuilder_ == null) { return blockTokens_.get(index); } else { return blockTokensBuilder_.getMessageOrBuilder(index); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensOrBuilderList() { if (blockTokensBuilder_ != null) { return blockTokensBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blockTokens_); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder() { return getBlockTokensFieldBuilder().addBuilder( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()); } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder( int index) { return getBlockTokensFieldBuilder().addBuilder( index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()); } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensBuilderList() { return getBlockTokensFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> getBlockTokensFieldBuilder() { if (blockTokensBuilder_ == null) { blockTokensBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>( blockTokens_, ((bitField0_ & 0x00000200) != 0), getParentForChildren(), isClean()); blockTokens_ = null; } return blockTokensBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlockProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlockProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public LocatedBlockProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BatchedListingKeyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BatchedListingKeyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes checksum = 1; * @return Whether the checksum field is set. */ boolean hasChecksum(); /** * required bytes checksum = 1; * @return The checksum. */ org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum(); /** * required uint32 pathIndex = 2; * @return Whether the pathIndex field is set. */ boolean hasPathIndex(); /** * required uint32 pathIndex = 2; * @return The pathIndex. */ int getPathIndex(); /** * required bytes startAfter = 3; * @return Whether the startAfter field is set. */ boolean hasStartAfter(); /** * required bytes startAfter = 3; * @return The startAfter. */ org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter(); } /** * Protobuf type {@code hadoop.hdfs.BatchedListingKeyProto} */ public static final class BatchedListingKeyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BatchedListingKeyProto) BatchedListingKeyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BatchedListingKeyProto.newBuilder() to construct. private BatchedListingKeyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BatchedListingKeyProto() { checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new BatchedListingKeyProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.Builder.class); } private int bitField0_; public static final int CHECKSUM_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes checksum = 1; * @return Whether the checksum field is set. */ @java.lang.Override public boolean hasChecksum() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes checksum = 1; * @return The checksum. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum() { return checksum_; } public static final int PATHINDEX_FIELD_NUMBER = 2; private int pathIndex_ = 0; /** * required uint32 pathIndex = 2; * @return Whether the pathIndex field is set. */ @java.lang.Override public boolean hasPathIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 pathIndex = 2; * @return The pathIndex. */ @java.lang.Override public int getPathIndex() { return pathIndex_; } public static final int STARTAFTER_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes startAfter = 3; * @return Whether the startAfter field is set. */ @java.lang.Override public boolean hasStartAfter() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes startAfter = 3; * @return The startAfter. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter() { return startAfter_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasPathIndex()) { memoizedIsInitialized = 0; return false; } if (!hasStartAfter()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, checksum_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, pathIndex_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, startAfter_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, checksum_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, pathIndex_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, startAfter_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto) obj; if (hasChecksum() != other.hasChecksum()) return false; if (hasChecksum()) { if (!getChecksum() .equals(other.getChecksum())) return false; } if (hasPathIndex() != other.hasPathIndex()) return false; if (hasPathIndex()) { if (getPathIndex() != other.getPathIndex()) return false; } if (hasStartAfter() != other.hasStartAfter()) return false; if (hasStartAfter()) { if (!getStartAfter() .equals(other.getStartAfter())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasChecksum()) { hash = (37 * hash) + CHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getChecksum().hashCode(); } if (hasPathIndex()) { hash = (37 * hash) + PATHINDEX_FIELD_NUMBER; hash = (53 * hash) + getPathIndex(); } if (hasStartAfter()) { hash = (37 * hash) + STARTAFTER_FIELD_NUMBER; hash = (53 * hash) + getStartAfter().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.BatchedListingKeyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BatchedListingKeyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; pathIndex_ = 0; startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.checksum_ = checksum_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.pathIndex_ = pathIndex_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.startAfter_ = startAfter_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.getDefaultInstance()) return this; if (other.hasChecksum()) { setChecksum(other.getChecksum()); } if (other.hasPathIndex()) { setPathIndex(other.getPathIndex()); } if (other.hasStartAfter()) { setStartAfter(other.getStartAfter()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasChecksum()) { return false; } if (!hasPathIndex()) { return false; } if (!hasStartAfter()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { checksum_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { pathIndex_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { startAfter_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes checksum = 1; * @return Whether the checksum field is set. */ @java.lang.Override public boolean hasChecksum() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes checksum = 1; * @return The checksum. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum() { return checksum_; } /** * required bytes checksum = 1; * @param value The checksum to set. * @return This builder for chaining. */ public Builder setChecksum(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checksum_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required bytes checksum = 1; * @return This builder for chaining. */ public Builder clearChecksum() { bitField0_ = (bitField0_ & ~0x00000001); checksum_ = getDefaultInstance().getChecksum(); onChanged(); return this; } private int pathIndex_ ; /** * required uint32 pathIndex = 2; * @return Whether the pathIndex field is set. */ @java.lang.Override public boolean hasPathIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 pathIndex = 2; * @return The pathIndex. */ @java.lang.Override public int getPathIndex() { return pathIndex_; } /** * required uint32 pathIndex = 2; * @param value The pathIndex to set. * @return This builder for chaining. */ public Builder setPathIndex(int value) { pathIndex_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint32 pathIndex = 2; * @return This builder for chaining. */ public Builder clearPathIndex() { bitField0_ = (bitField0_ & ~0x00000002); pathIndex_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes startAfter = 3; * @return Whether the startAfter field is set. */ @java.lang.Override public boolean hasStartAfter() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes startAfter = 3; * @return The startAfter. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter() { return startAfter_; } /** * required bytes startAfter = 3; * @param value The startAfter to set. * @return This builder for chaining. */ public Builder setStartAfter(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } startAfter_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required bytes startAfter = 3; * @return This builder for chaining. */ public Builder clearStartAfter() { bitField0_ = (bitField0_ & ~0x00000004); startAfter_ = getDefaultInstance().getStartAfter(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BatchedListingKeyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BatchedListingKeyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BatchedListingKeyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DataEncryptionKeyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DataEncryptionKeyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint32 keyId = 1; * @return Whether the keyId field is set. */ boolean hasKeyId(); /** * required uint32 keyId = 1; * @return The keyId. */ int getKeyId(); /** * required string blockPoolId = 2; * @return Whether the blockPoolId field is set. */ boolean hasBlockPoolId(); /** * required string blockPoolId = 2; * @return The blockPoolId. */ java.lang.String getBlockPoolId(); /** * required string blockPoolId = 2; * @return The bytes for blockPoolId. */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes(); /** * required bytes nonce = 3; * @return Whether the nonce field is set. */ boolean hasNonce(); /** * required bytes nonce = 3; * @return The nonce. */ org.apache.hadoop.thirdparty.protobuf.ByteString getNonce(); /** * required bytes encryptionKey = 4; * @return Whether the encryptionKey field is set. */ boolean hasEncryptionKey(); /** * required bytes encryptionKey = 4; * @return The encryptionKey. */ org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey(); /** * required uint64 expiryDate = 5; * @return Whether the expiryDate field is set. */ boolean hasExpiryDate(); /** * required uint64 expiryDate = 5; * @return The expiryDate. */ long getExpiryDate(); /** * optional string encryptionAlgorithm = 6; * @return Whether the encryptionAlgorithm field is set. */ boolean hasEncryptionAlgorithm(); /** * optional string encryptionAlgorithm = 6; * @return The encryptionAlgorithm. */ java.lang.String getEncryptionAlgorithm(); /** * optional string encryptionAlgorithm = 6; * @return The bytes for encryptionAlgorithm. */ org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionAlgorithmBytes(); } /** * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto} */ public static final class DataEncryptionKeyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DataEncryptionKeyProto) DataEncryptionKeyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DataEncryptionKeyProto.newBuilder() to construct. private DataEncryptionKeyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DataEncryptionKeyProto() { blockPoolId_ = ""; nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; encryptionAlgorithm_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DataEncryptionKeyProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class); } private int bitField0_; public static final int KEYID_FIELD_NUMBER = 1; private int keyId_ = 0; /** * required uint32 keyId = 1; * @return Whether the keyId field is set. */ @java.lang.Override public boolean hasKeyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 keyId = 1; * @return The keyId. */ @java.lang.Override public int getKeyId() { return keyId_; } public static final int BLOCKPOOLID_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 2; * @return Whether the blockPoolId field is set. */ @java.lang.Override public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000002) != 0); } /** * required string blockPoolId = 2; * @return The blockPoolId. */ @java.lang.Override public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * required string blockPoolId = 2; * @return The bytes for blockPoolId. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int NONCE_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes nonce = 3; * @return Whether the nonce field is set. */ @java.lang.Override public boolean hasNonce() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes nonce = 3; * @return The nonce. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } public static final int ENCRYPTIONKEY_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes encryptionKey = 4; * @return Whether the encryptionKey field is set. */ @java.lang.Override public boolean hasEncryptionKey() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes encryptionKey = 4; * @return The encryptionKey. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey() { return encryptionKey_; } public static final int EXPIRYDATE_FIELD_NUMBER = 5; private long expiryDate_ = 0L; /** * required uint64 expiryDate = 5; * @return Whether the expiryDate field is set. */ @java.lang.Override public boolean hasExpiryDate() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 expiryDate = 5; * @return The expiryDate. */ @java.lang.Override public long getExpiryDate() { return expiryDate_; } public static final int ENCRYPTIONALGORITHM_FIELD_NUMBER = 6; @SuppressWarnings("serial") private volatile java.lang.Object encryptionAlgorithm_ = ""; /** * optional string encryptionAlgorithm = 6; * @return Whether the encryptionAlgorithm field is set. */ @java.lang.Override public boolean hasEncryptionAlgorithm() { return ((bitField0_ & 0x00000020) != 0); } /** * optional string encryptionAlgorithm = 6; * @return The encryptionAlgorithm. */ @java.lang.Override public java.lang.String getEncryptionAlgorithm() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { encryptionAlgorithm_ = s; } return s; } } /** * optional string encryptionAlgorithm = 6; * @return The bytes for encryptionAlgorithm. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionAlgorithmBytes() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); encryptionAlgorithm_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasKeyId()) { memoizedIsInitialized = 0; return false; } if (!hasBlockPoolId()) { memoizedIsInitialized = 0; return false; } if (!hasNonce()) { memoizedIsInitialized = 0; return false; } if (!hasEncryptionKey()) { memoizedIsInitialized = 0; return false; } if (!hasExpiryDate()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, keyId_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, nonce_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, encryptionKey_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, expiryDate_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, encryptionAlgorithm_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, keyId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, nonce_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, encryptionKey_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, expiryDate_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, encryptionAlgorithm_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) obj; if (hasKeyId() != other.hasKeyId()) return false; if (hasKeyId()) { if (getKeyId() != other.getKeyId()) return false; } if (hasBlockPoolId() != other.hasBlockPoolId()) return false; if (hasBlockPoolId()) { if (!getBlockPoolId() .equals(other.getBlockPoolId())) return false; } if (hasNonce() != other.hasNonce()) return false; if (hasNonce()) { if (!getNonce() .equals(other.getNonce())) return false; } if (hasEncryptionKey() != other.hasEncryptionKey()) return false; if (hasEncryptionKey()) { if (!getEncryptionKey() .equals(other.getEncryptionKey())) return false; } if (hasExpiryDate() != other.hasExpiryDate()) return false; if (hasExpiryDate()) { if (getExpiryDate() != other.getExpiryDate()) return false; } if (hasEncryptionAlgorithm() != other.hasEncryptionAlgorithm()) return false; if (hasEncryptionAlgorithm()) { if (!getEncryptionAlgorithm() .equals(other.getEncryptionAlgorithm())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasKeyId()) { hash = (37 * hash) + KEYID_FIELD_NUMBER; hash = (53 * hash) + getKeyId(); } if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasNonce()) { hash = (37 * hash) + NONCE_FIELD_NUMBER; hash = (53 * hash) + getNonce().hashCode(); } if (hasEncryptionKey()) { hash = (37 * hash) + ENCRYPTIONKEY_FIELD_NUMBER; hash = (53 * hash) + getEncryptionKey().hashCode(); } if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getExpiryDate()); } if (hasEncryptionAlgorithm()) { hash = (37 * hash) + ENCRYPTIONALGORITHM_FIELD_NUMBER; hash = (53 * hash) + getEncryptionAlgorithm().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DataEncryptionKeyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; keyId_ = 0; blockPoolId_ = ""; nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; expiryDate_ = 0L; encryptionAlgorithm_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.keyId_ = keyId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.blockPoolId_ = blockPoolId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.nonce_ = nonce_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.encryptionKey_ = encryptionKey_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.expiryDate_ = expiryDate_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.encryptionAlgorithm_ = encryptionAlgorithm_; to_bitField0_ |= 0x00000020; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance()) return this; if (other.hasKeyId()) { setKeyId(other.getKeyId()); } if (other.hasBlockPoolId()) { blockPoolId_ = other.blockPoolId_; bitField0_ |= 0x00000002; onChanged(); } if (other.hasNonce()) { setNonce(other.getNonce()); } if (other.hasEncryptionKey()) { setEncryptionKey(other.getEncryptionKey()); } if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } if (other.hasEncryptionAlgorithm()) { encryptionAlgorithm_ = other.encryptionAlgorithm_; bitField0_ |= 0x00000020; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasKeyId()) { return false; } if (!hasBlockPoolId()) { return false; } if (!hasNonce()) { return false; } if (!hasEncryptionKey()) { return false; } if (!hasExpiryDate()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { keyId_ = input.readUInt32(); bitField0_ |= 0x00000001; break; } // case 8 case 18: { blockPoolId_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { nonce_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 34: { encryptionKey_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 case 40: { expiryDate_ = input.readUInt64(); bitField0_ |= 0x00000010; break; } // case 40 case 50: { encryptionAlgorithm_ = input.readBytes(); bitField0_ |= 0x00000020; break; } // case 50 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int keyId_ ; /** * required uint32 keyId = 1; * @return Whether the keyId field is set. */ @java.lang.Override public boolean hasKeyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 keyId = 1; * @return The keyId. */ @java.lang.Override public int getKeyId() { return keyId_; } /** * required uint32 keyId = 1; * @param value The keyId to set. * @return This builder for chaining. */ public Builder setKeyId(int value) { keyId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required uint32 keyId = 1; * @return This builder for chaining. */ public Builder clearKeyId() { bitField0_ = (bitField0_ & ~0x00000001); keyId_ = 0; onChanged(); return this; } private java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 2; * @return Whether the blockPoolId field is set. */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000002) != 0); } /** * required string blockPoolId = 2; * @return The blockPoolId. */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolId = 2; * @return The bytes for blockPoolId. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string blockPoolId = 2; * @param value The blockPoolId to set. * @return This builder for chaining. */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } blockPoolId_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string blockPoolId = 2; * @return This builder for chaining. */ public Builder clearBlockPoolId() { blockPoolId_ = getDefaultInstance().getBlockPoolId(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string blockPoolId = 2; * @param value The bytes for blockPoolId to set. * @return This builder for chaining. */ public Builder setBlockPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } blockPoolId_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes nonce = 3; * @return Whether the nonce field is set. */ @java.lang.Override public boolean hasNonce() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes nonce = 3; * @return The nonce. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } /** * required bytes nonce = 3; * @param value The nonce to set. * @return This builder for chaining. */ public Builder setNonce(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } nonce_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required bytes nonce = 3; * @return This builder for chaining. */ public Builder clearNonce() { bitField0_ = (bitField0_ & ~0x00000004); nonce_ = getDefaultInstance().getNonce(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes encryptionKey = 4; * @return Whether the encryptionKey field is set. */ @java.lang.Override public boolean hasEncryptionKey() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes encryptionKey = 4; * @return The encryptionKey. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey() { return encryptionKey_; } /** * required bytes encryptionKey = 4; * @param value The encryptionKey to set. * @return This builder for chaining. */ public Builder setEncryptionKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } encryptionKey_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required bytes encryptionKey = 4; * @return This builder for chaining. */ public Builder clearEncryptionKey() { bitField0_ = (bitField0_ & ~0x00000008); encryptionKey_ = getDefaultInstance().getEncryptionKey(); onChanged(); return this; } private long expiryDate_ ; /** * required uint64 expiryDate = 5; * @return Whether the expiryDate field is set. */ @java.lang.Override public boolean hasExpiryDate() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 expiryDate = 5; * @return The expiryDate. */ @java.lang.Override public long getExpiryDate() { return expiryDate_; } /** * required uint64 expiryDate = 5; * @param value The expiryDate to set. * @return This builder for chaining. */ public Builder setExpiryDate(long value) { expiryDate_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required uint64 expiryDate = 5; * @return This builder for chaining. */ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000010); expiryDate_ = 0L; onChanged(); return this; } private java.lang.Object encryptionAlgorithm_ = ""; /** * optional string encryptionAlgorithm = 6; * @return Whether the encryptionAlgorithm field is set. */ public boolean hasEncryptionAlgorithm() { return ((bitField0_ & 0x00000020) != 0); } /** * optional string encryptionAlgorithm = 6; * @return The encryptionAlgorithm. */ public java.lang.String getEncryptionAlgorithm() { java.lang.Object ref = encryptionAlgorithm_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { encryptionAlgorithm_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string encryptionAlgorithm = 6; * @return The bytes for encryptionAlgorithm. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionAlgorithmBytes() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); encryptionAlgorithm_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string encryptionAlgorithm = 6; * @param value The encryptionAlgorithm to set. * @return This builder for chaining. */ public Builder setEncryptionAlgorithm( java.lang.String value) { if (value == null) { throw new NullPointerException(); } encryptionAlgorithm_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional string encryptionAlgorithm = 6; * @return This builder for chaining. */ public Builder clearEncryptionAlgorithm() { encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } /** * optional string encryptionAlgorithm = 6; * @param value The bytes for encryptionAlgorithm to set. * @return This builder for chaining. */ public Builder setEncryptionAlgorithmBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } encryptionAlgorithm_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataEncryptionKeyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DataEncryptionKeyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DataEncryptionKeyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface FileEncryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.FileEncryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return Whether the cryptoProtocolVersion field is set. */ boolean hasCryptoProtocolVersion(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return The cryptoProtocolVersion. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(); /** * required bytes key = 3; * @return Whether the key field is set. */ boolean hasKey(); /** * required bytes key = 3; * @return The key. */ org.apache.hadoop.thirdparty.protobuf.ByteString getKey(); /** * required bytes iv = 4; * @return Whether the iv field is set. */ boolean hasIv(); /** * required bytes iv = 4; * @return The iv. */ org.apache.hadoop.thirdparty.protobuf.ByteString getIv(); /** * required string keyName = 5; * @return Whether the keyName field is set. */ boolean hasKeyName(); /** * required string keyName = 5; * @return The keyName. */ java.lang.String getKeyName(); /** * required string keyName = 5; * @return The bytes for keyName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes(); /** * required string ezKeyVersionName = 6; * @return Whether the ezKeyVersionName field is set. */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 6; * @return The ezKeyVersionName. */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 6; * @return The bytes for ezKeyVersionName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes(); } /** *
   **
   * Encryption information for a file.
   * 
* * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto} */ public static final class FileEncryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.FileEncryptionInfoProto) FileEncryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use FileEncryptionInfoProto.newBuilder() to construct. private FileEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FileEncryptionInfoProto() { suite_ = 1; cryptoProtocolVersion_ = 1; key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; keyName_ = ""; ezKeyVersionName_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new FileEncryptionInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class); } private int bitField0_; public static final int SUITE_FIELD_NUMBER = 1; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ @java.lang.Override public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2; private int cryptoProtocolVersion_ = 1; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return Whether the cryptoProtocolVersion field is set. */ @java.lang.Override public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return The cryptoProtocolVersion. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } public static final int KEY_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes key = 3; * @return Whether the key field is set. */ @java.lang.Override public boolean hasKey() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes key = 3; * @return The key. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } public static final int IV_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes iv = 4; * @return Whether the iv field is set. */ @java.lang.Override public boolean hasIv() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes iv = 4; * @return The iv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } public static final int KEYNAME_FIELD_NUMBER = 5; @SuppressWarnings("serial") private volatile java.lang.Object keyName_ = ""; /** * required string keyName = 5; * @return Whether the keyName field is set. */ @java.lang.Override public boolean hasKeyName() { return ((bitField0_ & 0x00000010) != 0); } /** * required string keyName = 5; * @return The keyName. */ @java.lang.Override public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } } /** * required string keyName = 5; * @return The bytes for keyName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 6; @SuppressWarnings("serial") private volatile java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 6; * @return Whether the ezKeyVersionName field is set. */ @java.lang.Override public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000020) != 0); } /** * required string ezKeyVersionName = 6; * @return The ezKeyVersionName. */ @java.lang.Override public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 6; * @return The bytes for ezKeyVersionName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } if (!hasCryptoProtocolVersion()) { memoizedIsInitialized = 0; return false; } if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasIv()) { memoizedIsInitialized = 0; return false; } if (!hasKeyName()) { memoizedIsInitialized = 0; return false; } if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, key_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, iv_); } if (((bitField0_ & 0x00000010) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, keyName_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, ezKeyVersionName_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, key_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, iv_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, keyName_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, ezKeyVersionName_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) obj; if (hasSuite() != other.hasSuite()) return false; if (hasSuite()) { if (suite_ != other.suite_) return false; } if (hasCryptoProtocolVersion() != other.hasCryptoProtocolVersion()) return false; if (hasCryptoProtocolVersion()) { if (cryptoProtocolVersion_ != other.cryptoProtocolVersion_) return false; } if (hasKey() != other.hasKey()) return false; if (hasKey()) { if (!getKey() .equals(other.getKey())) return false; } if (hasIv() != other.hasIv()) return false; if (hasIv()) { if (!getIv() .equals(other.getIv())) return false; } if (hasKeyName() != other.hasKeyName()) return false; if (hasKeyName()) { if (!getKeyName() .equals(other.getKeyName())) return false; } if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false; if (hasEzKeyVersionName()) { if (!getEzKeyVersionName() .equals(other.getEzKeyVersionName())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + suite_; } if (hasCryptoProtocolVersion()) { hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER; hash = (53 * hash) + cryptoProtocolVersion_; } if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasIv()) { hash = (37 * hash) + IV_FIELD_NUMBER; hash = (53 * hash) + getIv().hashCode(); } if (hasKeyName()) { hash = (37 * hash) + KEYNAME_FIELD_NUMBER; hash = (53 * hash) + getKeyName().hashCode(); } if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Encryption information for a file.
     * 
* * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.FileEncryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; suite_ = 1; cryptoProtocolVersion_ = 1; key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; keyName_ = ""; ezKeyVersionName_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.suite_ = suite_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.cryptoProtocolVersion_ = cryptoProtocolVersion_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.key_ = key_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.iv_ = iv_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.keyName_ = keyName_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.ezKeyVersionName_ = ezKeyVersionName_; to_bitField0_ |= 0x00000020; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasCryptoProtocolVersion()) { setCryptoProtocolVersion(other.getCryptoProtocolVersion()); } if (other.hasKey()) { setKey(other.getKey()); } if (other.hasIv()) { setIv(other.getIv()); } if (other.hasKeyName()) { keyName_ = other.keyName_; bitField0_ |= 0x00000010; onChanged(); } if (other.hasEzKeyVersionName()) { ezKeyVersionName_ = other.ezKeyVersionName_; bitField0_ |= 0x00000020; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSuite()) { return false; } if (!hasCryptoProtocolVersion()) { return false; } if (!hasKey()) { return false; } if (!hasIv()) { return false; } if (!hasKeyName()) { return false; } if (!hasEzKeyVersionName()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { suite_ = tmpRaw; bitField0_ |= 0x00000001; } break; } // case 8 case 16: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(2, tmpRaw); } else { cryptoProtocolVersion_ = tmpRaw; bitField0_ |= 0x00000002; } break; } // case 16 case 26: { key_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 34: { iv_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 case 42: { keyName_ = input.readBytes(); bitField0_ |= 0x00000010; break; } // case 42 case 50: { ezKeyVersionName_ = input.readBytes(); bitField0_ |= 0x00000020; break; } // case 50 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ @java.lang.Override public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @param value The suite to set. * @return This builder for chaining. */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return This builder for chaining. */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = 1; onChanged(); return this; } private int cryptoProtocolVersion_ = 1; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return Whether the cryptoProtocolVersion field is set. */ @java.lang.Override public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return The cryptoProtocolVersion. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @param value The cryptoProtocolVersion to set. * @return This builder for chaining. */ public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return This builder for chaining. */ public Builder clearCryptoProtocolVersion() { bitField0_ = (bitField0_ & ~0x00000002); cryptoProtocolVersion_ = 1; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes key = 3; * @return Whether the key field is set. */ @java.lang.Override public boolean hasKey() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes key = 3; * @return The key. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } /** * required bytes key = 3; * @param value The key to set. * @return This builder for chaining. */ public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } key_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required bytes key = 3; * @return This builder for chaining. */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000004); key_ = getDefaultInstance().getKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes iv = 4; * @return Whether the iv field is set. */ @java.lang.Override public boolean hasIv() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes iv = 4; * @return The iv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } /** * required bytes iv = 4; * @param value The iv to set. * @return This builder for chaining. */ public Builder setIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } iv_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required bytes iv = 4; * @return This builder for chaining. */ public Builder clearIv() { bitField0_ = (bitField0_ & ~0x00000008); iv_ = getDefaultInstance().getIv(); onChanged(); return this; } private java.lang.Object keyName_ = ""; /** * required string keyName = 5; * @return Whether the keyName field is set. */ public boolean hasKeyName() { return ((bitField0_ & 0x00000010) != 0); } /** * required string keyName = 5; * @return The keyName. */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string keyName = 5; * @return The bytes for keyName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string keyName = 5; * @param value The keyName to set. * @return This builder for chaining. */ public Builder setKeyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } keyName_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required string keyName = 5; * @return This builder for chaining. */ public Builder clearKeyName() { keyName_ = getDefaultInstance().getKeyName(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * required string keyName = 5; * @param value The bytes for keyName to set. * @return This builder for chaining. */ public Builder setKeyNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } keyName_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 6; * @return Whether the ezKeyVersionName field is set. */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000020) != 0); } /** * required string ezKeyVersionName = 6; * @return The ezKeyVersionName. */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 6; * @return The bytes for ezKeyVersionName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 6; * @param value The ezKeyVersionName to set. * @return This builder for chaining. */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ezKeyVersionName_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * required string ezKeyVersionName = 6; * @return This builder for chaining. */ public Builder clearEzKeyVersionName() { ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } /** * required string ezKeyVersionName = 6; * @param value The bytes for ezKeyVersionName to set. * @return This builder for chaining. */ public Builder setEzKeyVersionNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ezKeyVersionName_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FileEncryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FileEncryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FileEncryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface PerFileEncryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.PerFileEncryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes key = 1; * @return Whether the key field is set. */ boolean hasKey(); /** * required bytes key = 1; * @return The key. */ org.apache.hadoop.thirdparty.protobuf.ByteString getKey(); /** * required bytes iv = 2; * @return Whether the iv field is set. */ boolean hasIv(); /** * required bytes iv = 2; * @return The iv. */ org.apache.hadoop.thirdparty.protobuf.ByteString getIv(); /** * required string ezKeyVersionName = 3; * @return Whether the ezKeyVersionName field is set. */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 3; * @return The ezKeyVersionName. */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 3; * @return The bytes for ezKeyVersionName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes(); } /** *
   **
   * Encryption information for an individual
   * file within an encryption zone
   * 
* * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto} */ public static final class PerFileEncryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.PerFileEncryptionInfoProto) PerFileEncryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use PerFileEncryptionInfoProto.newBuilder() to construct. private PerFileEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private PerFileEncryptionInfoProto() { key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; ezKeyVersionName_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new PerFileEncryptionInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class); } private int bitField0_; public static final int KEY_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes key = 1; * @return Whether the key field is set. */ @java.lang.Override public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes key = 1; * @return The key. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } public static final int IV_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes iv = 2; * @return Whether the iv field is set. */ @java.lang.Override public boolean hasIv() { return ((bitField0_ & 0x00000002) != 0); } /** * required bytes iv = 2; * @return The iv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 3; * @return Whether the ezKeyVersionName field is set. */ @java.lang.Override public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string ezKeyVersionName = 3; * @return The ezKeyVersionName. */ @java.lang.Override public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 3; * @return The bytes for ezKeyVersionName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasIv()) { memoizedIsInitialized = 0; return false; } if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, key_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, iv_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, ezKeyVersionName_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, key_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, iv_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, ezKeyVersionName_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) obj; if (hasKey() != other.hasKey()) return false; if (hasKey()) { if (!getKey() .equals(other.getKey())) return false; } if (hasIv() != other.hasIv()) return false; if (hasIv()) { if (!getIv() .equals(other.getIv())) return false; } if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false; if (hasEzKeyVersionName()) { if (!getEzKeyVersionName() .equals(other.getEzKeyVersionName())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasIv()) { hash = (37 * hash) + IV_FIELD_NUMBER; hash = (53 * hash) + getIv().hashCode(); } if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Encryption information for an individual
     * file within an encryption zone
     * 
* * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.PerFileEncryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; ezKeyVersionName_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.key_ = key_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.iv_ = iv_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.ezKeyVersionName_ = ezKeyVersionName_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasKey()) { setKey(other.getKey()); } if (other.hasIv()) { setIv(other.getIv()); } if (other.hasEzKeyVersionName()) { ezKeyVersionName_ = other.ezKeyVersionName_; bitField0_ |= 0x00000004; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasKey()) { return false; } if (!hasIv()) { return false; } if (!hasEzKeyVersionName()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { key_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { iv_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { ezKeyVersionName_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes key = 1; * @return Whether the key field is set. */ @java.lang.Override public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes key = 1; * @return The key. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } /** * required bytes key = 1; * @param value The key to set. * @return This builder for chaining. */ public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } key_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required bytes key = 1; * @return This builder for chaining. */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); key_ = getDefaultInstance().getKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes iv = 2; * @return Whether the iv field is set. */ @java.lang.Override public boolean hasIv() { return ((bitField0_ & 0x00000002) != 0); } /** * required bytes iv = 2; * @return The iv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } /** * required bytes iv = 2; * @param value The iv to set. * @return This builder for chaining. */ public Builder setIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } iv_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required bytes iv = 2; * @return This builder for chaining. */ public Builder clearIv() { bitField0_ = (bitField0_ & ~0x00000002); iv_ = getDefaultInstance().getIv(); onChanged(); return this; } private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 3; * @return Whether the ezKeyVersionName field is set. */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string ezKeyVersionName = 3; * @return The ezKeyVersionName. */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 3; * @return The bytes for ezKeyVersionName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 3; * @param value The ezKeyVersionName to set. * @return This builder for chaining. */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ezKeyVersionName_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required string ezKeyVersionName = 3; * @return This builder for chaining. */ public Builder clearEzKeyVersionName() { ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * required string ezKeyVersionName = 3; * @param value The bytes for ezKeyVersionName to set. * @return This builder for chaining. */ public Builder setEzKeyVersionNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ezKeyVersionName_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PerFileEncryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.PerFileEncryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public PerFileEncryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ZoneEncryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ZoneEncryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return Whether the cryptoProtocolVersion field is set. */ boolean hasCryptoProtocolVersion(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return The cryptoProtocolVersion. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(); /** * required string keyName = 3; * @return Whether the keyName field is set. */ boolean hasKeyName(); /** * required string keyName = 3; * @return The keyName. */ java.lang.String getKeyName(); /** * required string keyName = 3; * @return The bytes for keyName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes(); /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; * @return Whether the reencryptionProto field is set. */ boolean hasReencryptionProto(); /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; * @return The reencryptionProto. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto(); /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder(); } /** *
   **
   * Encryption information for an encryption
   * zone
   * 
* * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto} */ public static final class ZoneEncryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ZoneEncryptionInfoProto) ZoneEncryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ZoneEncryptionInfoProto.newBuilder() to construct. private ZoneEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ZoneEncryptionInfoProto() { suite_ = 1; cryptoProtocolVersion_ = 1; keyName_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ZoneEncryptionInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class); } private int bitField0_; public static final int SUITE_FIELD_NUMBER = 1; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ @java.lang.Override public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2; private int cryptoProtocolVersion_ = 1; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return Whether the cryptoProtocolVersion field is set. */ @java.lang.Override public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return The cryptoProtocolVersion. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } public static final int KEYNAME_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object keyName_ = ""; /** * required string keyName = 3; * @return Whether the keyName field is set. */ @java.lang.Override public boolean hasKeyName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string keyName = 3; * @return The keyName. */ @java.lang.Override public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } } /** * required string keyName = 3; * @return The bytes for keyName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int REENCRYPTIONPROTO_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_; /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; * @return Whether the reencryptionProto field is set. */ @java.lang.Override public boolean hasReencryptionProto() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; * @return The reencryptionProto. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } if (!hasCryptoProtocolVersion()) { memoizedIsInitialized = 0; return false; } if (!hasKeyName()) { memoizedIsInitialized = 0; return false; } if (hasReencryptionProto()) { if (!getReencryptionProto().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, keyName_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getReencryptionProto()); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, keyName_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getReencryptionProto()); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) obj; if (hasSuite() != other.hasSuite()) return false; if (hasSuite()) { if (suite_ != other.suite_) return false; } if (hasCryptoProtocolVersion() != other.hasCryptoProtocolVersion()) return false; if (hasCryptoProtocolVersion()) { if (cryptoProtocolVersion_ != other.cryptoProtocolVersion_) return false; } if (hasKeyName() != other.hasKeyName()) return false; if (hasKeyName()) { if (!getKeyName() .equals(other.getKeyName())) return false; } if (hasReencryptionProto() != other.hasReencryptionProto()) return false; if (hasReencryptionProto()) { if (!getReencryptionProto() .equals(other.getReencryptionProto())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + suite_; } if (hasCryptoProtocolVersion()) { hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER; hash = (53 * hash) + cryptoProtocolVersion_; } if (hasKeyName()) { hash = (37 * hash) + KEYNAME_FIELD_NUMBER; hash = (53 * hash) + getKeyName().hashCode(); } if (hasReencryptionProto()) { hash = (37 * hash) + REENCRYPTIONPROTO_FIELD_NUMBER; hash = (53 * hash) + getReencryptionProto().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Encryption information for an encryption
     * zone
     * 
* * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ZoneEncryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getReencryptionProtoFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; suite_ = 1; cryptoProtocolVersion_ = 1; keyName_ = ""; reencryptionProto_ = null; if (reencryptionProtoBuilder_ != null) { reencryptionProtoBuilder_.dispose(); reencryptionProtoBuilder_ = null; } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.suite_ = suite_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.cryptoProtocolVersion_ = cryptoProtocolVersion_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.keyName_ = keyName_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.reencryptionProto_ = reencryptionProtoBuilder_ == null ? reencryptionProto_ : reencryptionProtoBuilder_.build(); to_bitField0_ |= 0x00000008; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasCryptoProtocolVersion()) { setCryptoProtocolVersion(other.getCryptoProtocolVersion()); } if (other.hasKeyName()) { keyName_ = other.keyName_; bitField0_ |= 0x00000004; onChanged(); } if (other.hasReencryptionProto()) { mergeReencryptionProto(other.getReencryptionProto()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSuite()) { return false; } if (!hasCryptoProtocolVersion()) { return false; } if (!hasKeyName()) { return false; } if (hasReencryptionProto()) { if (!getReencryptionProto().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { suite_ = tmpRaw; bitField0_ |= 0x00000001; } break; } // case 8 case 16: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(2, tmpRaw); } else { cryptoProtocolVersion_ = tmpRaw; bitField0_ |= 0x00000002; } break; } // case 16 case 26: { keyName_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 34: { input.readMessage( getReencryptionProtoFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000008; break; } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ @java.lang.Override public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @param value The suite to set. * @return This builder for chaining. */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return This builder for chaining. */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = 1; onChanged(); return this; } private int cryptoProtocolVersion_ = 1; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return Whether the cryptoProtocolVersion field is set. */ @java.lang.Override public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return The cryptoProtocolVersion. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @param value The cryptoProtocolVersion to set. * @return This builder for chaining. */ public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; * @return This builder for chaining. */ public Builder clearCryptoProtocolVersion() { bitField0_ = (bitField0_ & ~0x00000002); cryptoProtocolVersion_ = 1; onChanged(); return this; } private java.lang.Object keyName_ = ""; /** * required string keyName = 3; * @return Whether the keyName field is set. */ public boolean hasKeyName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string keyName = 3; * @return The keyName. */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string keyName = 3; * @return The bytes for keyName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string keyName = 3; * @param value The keyName to set. * @return This builder for chaining. */ public Builder setKeyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } keyName_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required string keyName = 3; * @return This builder for chaining. */ public Builder clearKeyName() { keyName_ = getDefaultInstance().getKeyName(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * required string keyName = 3; * @param value The bytes for keyName to set. * @return This builder for chaining. */ public Builder setKeyNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } keyName_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder> reencryptionProtoBuilder_; /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; * @return Whether the reencryptionProto field is set. */ public boolean hasReencryptionProto() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; * @return The reencryptionProto. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() { if (reencryptionProtoBuilder_ == null) { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } else { return reencryptionProtoBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder setReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) { if (reencryptionProtoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reencryptionProto_ = value; } else { reencryptionProtoBuilder_.setMessage(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder setReencryptionProto( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder builderForValue) { if (reencryptionProtoBuilder_ == null) { reencryptionProto_ = builderForValue.build(); } else { reencryptionProtoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder mergeReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) { if (reencryptionProtoBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && reencryptionProto_ != null && reencryptionProto_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) { getReencryptionProtoBuilder().mergeFrom(value); } else { reencryptionProto_ = value; } } else { reencryptionProtoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder clearReencryptionProto() { bitField0_ = (bitField0_ & ~0x00000008); reencryptionProto_ = null; if (reencryptionProtoBuilder_ != null) { reencryptionProtoBuilder_.dispose(); reencryptionProtoBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder getReencryptionProtoBuilder() { bitField0_ |= 0x00000008; onChanged(); return getReencryptionProtoFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() { if (reencryptionProtoBuilder_ != null) { return reencryptionProtoBuilder_.getMessageOrBuilder(); } else { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder> getReencryptionProtoFieldBuilder() { if (reencryptionProtoBuilder_ == null) { reencryptionProtoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder>( getReencryptionProto(), getParentForChildren(), isClean()); reencryptionProto_ = null; } return reencryptionProtoBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ZoneEncryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ZoneEncryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ZoneEncryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ReencryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReencryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string ezKeyVersionName = 1; * @return Whether the ezKeyVersionName field is set. */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 1; * @return The ezKeyVersionName. */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 1; * @return The bytes for ezKeyVersionName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes(); /** * required uint64 submissionTime = 2; * @return Whether the submissionTime field is set. */ boolean hasSubmissionTime(); /** * required uint64 submissionTime = 2; * @return The submissionTime. */ long getSubmissionTime(); /** * required bool canceled = 3; * @return Whether the canceled field is set. */ boolean hasCanceled(); /** * required bool canceled = 3; * @return The canceled. */ boolean getCanceled(); /** * required int64 numReencrypted = 4; * @return Whether the numReencrypted field is set. */ boolean hasNumReencrypted(); /** * required int64 numReencrypted = 4; * @return The numReencrypted. */ long getNumReencrypted(); /** * required int64 numFailures = 5; * @return Whether the numFailures field is set. */ boolean hasNumFailures(); /** * required int64 numFailures = 5; * @return The numFailures. */ long getNumFailures(); /** * optional uint64 completionTime = 6; * @return Whether the completionTime field is set. */ boolean hasCompletionTime(); /** * optional uint64 completionTime = 6; * @return The completionTime. */ long getCompletionTime(); /** * optional string lastFile = 7; * @return Whether the lastFile field is set. */ boolean hasLastFile(); /** * optional string lastFile = 7; * @return The lastFile. */ java.lang.String getLastFile(); /** * optional string lastFile = 7; * @return The bytes for lastFile. */ org.apache.hadoop.thirdparty.protobuf.ByteString getLastFileBytes(); } /** *
   **
   * Re-encryption information for an encryption zone
   * 
* * Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto} */ public static final class ReencryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ReencryptionInfoProto) ReencryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ReencryptionInfoProto.newBuilder() to construct. private ReencryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ReencryptionInfoProto() { ezKeyVersionName_ = ""; lastFile_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ReencryptionInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class); } private int bitField0_; public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 1; * @return Whether the ezKeyVersionName field is set. */ @java.lang.Override public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string ezKeyVersionName = 1; * @return The ezKeyVersionName. */ @java.lang.Override public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 1; * @return The bytes for ezKeyVersionName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int SUBMISSIONTIME_FIELD_NUMBER = 2; private long submissionTime_ = 0L; /** * required uint64 submissionTime = 2; * @return Whether the submissionTime field is set. */ @java.lang.Override public boolean hasSubmissionTime() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 submissionTime = 2; * @return The submissionTime. */ @java.lang.Override public long getSubmissionTime() { return submissionTime_; } public static final int CANCELED_FIELD_NUMBER = 3; private boolean canceled_ = false; /** * required bool canceled = 3; * @return Whether the canceled field is set. */ @java.lang.Override public boolean hasCanceled() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool canceled = 3; * @return The canceled. */ @java.lang.Override public boolean getCanceled() { return canceled_; } public static final int NUMREENCRYPTED_FIELD_NUMBER = 4; private long numReencrypted_ = 0L; /** * required int64 numReencrypted = 4; * @return Whether the numReencrypted field is set. */ @java.lang.Override public boolean hasNumReencrypted() { return ((bitField0_ & 0x00000008) != 0); } /** * required int64 numReencrypted = 4; * @return The numReencrypted. */ @java.lang.Override public long getNumReencrypted() { return numReencrypted_; } public static final int NUMFAILURES_FIELD_NUMBER = 5; private long numFailures_ = 0L; /** * required int64 numFailures = 5; * @return Whether the numFailures field is set. */ @java.lang.Override public boolean hasNumFailures() { return ((bitField0_ & 0x00000010) != 0); } /** * required int64 numFailures = 5; * @return The numFailures. */ @java.lang.Override public long getNumFailures() { return numFailures_; } public static final int COMPLETIONTIME_FIELD_NUMBER = 6; private long completionTime_ = 0L; /** * optional uint64 completionTime = 6; * @return Whether the completionTime field is set. */ @java.lang.Override public boolean hasCompletionTime() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 completionTime = 6; * @return The completionTime. */ @java.lang.Override public long getCompletionTime() { return completionTime_; } public static final int LASTFILE_FIELD_NUMBER = 7; @SuppressWarnings("serial") private volatile java.lang.Object lastFile_ = ""; /** * optional string lastFile = 7; * @return Whether the lastFile field is set. */ @java.lang.Override public boolean hasLastFile() { return ((bitField0_ & 0x00000040) != 0); } /** * optional string lastFile = 7; * @return The lastFile. */ @java.lang.Override public java.lang.String getLastFile() { java.lang.Object ref = lastFile_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { lastFile_ = s; } return s; } } /** * optional string lastFile = 7; * @return The bytes for lastFile. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getLastFileBytes() { java.lang.Object ref = lastFile_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); lastFile_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } if (!hasSubmissionTime()) { memoizedIsInitialized = 0; return false; } if (!hasCanceled()) { memoizedIsInitialized = 0; return false; } if (!hasNumReencrypted()) { memoizedIsInitialized = 0; return false; } if (!hasNumFailures()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, ezKeyVersionName_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, submissionTime_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBool(3, canceled_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeInt64(4, numReencrypted_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeInt64(5, numFailures_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, completionTime_); } if (((bitField0_ & 0x00000040) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 7, lastFile_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, ezKeyVersionName_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, submissionTime_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(3, canceled_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(4, numReencrypted_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(5, numFailures_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, completionTime_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(7, lastFile_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) obj; if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false; if (hasEzKeyVersionName()) { if (!getEzKeyVersionName() .equals(other.getEzKeyVersionName())) return false; } if (hasSubmissionTime() != other.hasSubmissionTime()) return false; if (hasSubmissionTime()) { if (getSubmissionTime() != other.getSubmissionTime()) return false; } if (hasCanceled() != other.hasCanceled()) return false; if (hasCanceled()) { if (getCanceled() != other.getCanceled()) return false; } if (hasNumReencrypted() != other.hasNumReencrypted()) return false; if (hasNumReencrypted()) { if (getNumReencrypted() != other.getNumReencrypted()) return false; } if (hasNumFailures() != other.hasNumFailures()) return false; if (hasNumFailures()) { if (getNumFailures() != other.getNumFailures()) return false; } if (hasCompletionTime() != other.hasCompletionTime()) return false; if (hasCompletionTime()) { if (getCompletionTime() != other.getCompletionTime()) return false; } if (hasLastFile() != other.hasLastFile()) return false; if (hasLastFile()) { if (!getLastFile() .equals(other.getLastFile())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } if (hasSubmissionTime()) { hash = (37 * hash) + SUBMISSIONTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSubmissionTime()); } if (hasCanceled()) { hash = (37 * hash) + CANCELED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getCanceled()); } if (hasNumReencrypted()) { hash = (37 * hash) + NUMREENCRYPTED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumReencrypted()); } if (hasNumFailures()) { hash = (37 * hash) + NUMFAILURES_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumFailures()); } if (hasCompletionTime()) { hash = (37 * hash) + COMPLETIONTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCompletionTime()); } if (hasLastFile()) { hash = (37 * hash) + LASTFILE_FIELD_NUMBER; hash = (53 * hash) + getLastFile().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Re-encryption information for an encryption zone
     * 
* * Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReencryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; ezKeyVersionName_ = ""; submissionTime_ = 0L; canceled_ = false; numReencrypted_ = 0L; numFailures_ = 0L; completionTime_ = 0L; lastFile_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.ezKeyVersionName_ = ezKeyVersionName_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.submissionTime_ = submissionTime_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.canceled_ = canceled_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.numReencrypted_ = numReencrypted_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.numFailures_ = numFailures_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.completionTime_ = completionTime_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.lastFile_ = lastFile_; to_bitField0_ |= 0x00000040; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) return this; if (other.hasEzKeyVersionName()) { ezKeyVersionName_ = other.ezKeyVersionName_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasSubmissionTime()) { setSubmissionTime(other.getSubmissionTime()); } if (other.hasCanceled()) { setCanceled(other.getCanceled()); } if (other.hasNumReencrypted()) { setNumReencrypted(other.getNumReencrypted()); } if (other.hasNumFailures()) { setNumFailures(other.getNumFailures()); } if (other.hasCompletionTime()) { setCompletionTime(other.getCompletionTime()); } if (other.hasLastFile()) { lastFile_ = other.lastFile_; bitField0_ |= 0x00000040; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasEzKeyVersionName()) { return false; } if (!hasSubmissionTime()) { return false; } if (!hasCanceled()) { return false; } if (!hasNumReencrypted()) { return false; } if (!hasNumFailures()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { ezKeyVersionName_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { submissionTime_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { canceled_ = input.readBool(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { numReencrypted_ = input.readInt64(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { numFailures_ = input.readInt64(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { completionTime_ = input.readUInt64(); bitField0_ |= 0x00000020; break; } // case 48 case 58: { lastFile_ = input.readBytes(); bitField0_ |= 0x00000040; break; } // case 58 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 1; * @return Whether the ezKeyVersionName field is set. */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string ezKeyVersionName = 1; * @return The ezKeyVersionName. */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 1; * @return The bytes for ezKeyVersionName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 1; * @param value The ezKeyVersionName to set. * @return This builder for chaining. */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ezKeyVersionName_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string ezKeyVersionName = 1; * @return This builder for chaining. */ public Builder clearEzKeyVersionName() { ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string ezKeyVersionName = 1; * @param value The bytes for ezKeyVersionName to set. * @return This builder for chaining. */ public Builder setEzKeyVersionNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ezKeyVersionName_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private long submissionTime_ ; /** * required uint64 submissionTime = 2; * @return Whether the submissionTime field is set. */ @java.lang.Override public boolean hasSubmissionTime() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 submissionTime = 2; * @return The submissionTime. */ @java.lang.Override public long getSubmissionTime() { return submissionTime_; } /** * required uint64 submissionTime = 2; * @param value The submissionTime to set. * @return This builder for chaining. */ public Builder setSubmissionTime(long value) { submissionTime_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint64 submissionTime = 2; * @return This builder for chaining. */ public Builder clearSubmissionTime() { bitField0_ = (bitField0_ & ~0x00000002); submissionTime_ = 0L; onChanged(); return this; } private boolean canceled_ ; /** * required bool canceled = 3; * @return Whether the canceled field is set. */ @java.lang.Override public boolean hasCanceled() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool canceled = 3; * @return The canceled. */ @java.lang.Override public boolean getCanceled() { return canceled_; } /** * required bool canceled = 3; * @param value The canceled to set. * @return This builder for chaining. */ public Builder setCanceled(boolean value) { canceled_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required bool canceled = 3; * @return This builder for chaining. */ public Builder clearCanceled() { bitField0_ = (bitField0_ & ~0x00000004); canceled_ = false; onChanged(); return this; } private long numReencrypted_ ; /** * required int64 numReencrypted = 4; * @return Whether the numReencrypted field is set. */ @java.lang.Override public boolean hasNumReencrypted() { return ((bitField0_ & 0x00000008) != 0); } /** * required int64 numReencrypted = 4; * @return The numReencrypted. */ @java.lang.Override public long getNumReencrypted() { return numReencrypted_; } /** * required int64 numReencrypted = 4; * @param value The numReencrypted to set. * @return This builder for chaining. */ public Builder setNumReencrypted(long value) { numReencrypted_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required int64 numReencrypted = 4; * @return This builder for chaining. */ public Builder clearNumReencrypted() { bitField0_ = (bitField0_ & ~0x00000008); numReencrypted_ = 0L; onChanged(); return this; } private long numFailures_ ; /** * required int64 numFailures = 5; * @return Whether the numFailures field is set. */ @java.lang.Override public boolean hasNumFailures() { return ((bitField0_ & 0x00000010) != 0); } /** * required int64 numFailures = 5; * @return The numFailures. */ @java.lang.Override public long getNumFailures() { return numFailures_; } /** * required int64 numFailures = 5; * @param value The numFailures to set. * @return This builder for chaining. */ public Builder setNumFailures(long value) { numFailures_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required int64 numFailures = 5; * @return This builder for chaining. */ public Builder clearNumFailures() { bitField0_ = (bitField0_ & ~0x00000010); numFailures_ = 0L; onChanged(); return this; } private long completionTime_ ; /** * optional uint64 completionTime = 6; * @return Whether the completionTime field is set. */ @java.lang.Override public boolean hasCompletionTime() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 completionTime = 6; * @return The completionTime. */ @java.lang.Override public long getCompletionTime() { return completionTime_; } /** * optional uint64 completionTime = 6; * @param value The completionTime to set. * @return This builder for chaining. */ public Builder setCompletionTime(long value) { completionTime_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional uint64 completionTime = 6; * @return This builder for chaining. */ public Builder clearCompletionTime() { bitField0_ = (bitField0_ & ~0x00000020); completionTime_ = 0L; onChanged(); return this; } private java.lang.Object lastFile_ = ""; /** * optional string lastFile = 7; * @return Whether the lastFile field is set. */ public boolean hasLastFile() { return ((bitField0_ & 0x00000040) != 0); } /** * optional string lastFile = 7; * @return The lastFile. */ public java.lang.String getLastFile() { java.lang.Object ref = lastFile_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { lastFile_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string lastFile = 7; * @return The bytes for lastFile. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getLastFileBytes() { java.lang.Object ref = lastFile_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); lastFile_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string lastFile = 7; * @param value The lastFile to set. * @return This builder for chaining. */ public Builder setLastFile( java.lang.String value) { if (value == null) { throw new NullPointerException(); } lastFile_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * optional string lastFile = 7; * @return This builder for chaining. */ public Builder clearLastFile() { lastFile_ = getDefaultInstance().getLastFile(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } /** * optional string lastFile = 7; * @param value The bytes for lastFile to set. * @return This builder for chaining. */ public Builder setLastFileBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } lastFile_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReencryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReencryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ReencryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface CipherOptionProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CipherOptionProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); /** * optional bytes inKey = 2; * @return Whether the inKey field is set. */ boolean hasInKey(); /** * optional bytes inKey = 2; * @return The inKey. */ org.apache.hadoop.thirdparty.protobuf.ByteString getInKey(); /** * optional bytes inIv = 3; * @return Whether the inIv field is set. */ boolean hasInIv(); /** * optional bytes inIv = 3; * @return The inIv. */ org.apache.hadoop.thirdparty.protobuf.ByteString getInIv(); /** * optional bytes outKey = 4; * @return Whether the outKey field is set. */ boolean hasOutKey(); /** * optional bytes outKey = 4; * @return The outKey. */ org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey(); /** * optional bytes outIv = 5; * @return Whether the outIv field is set. */ boolean hasOutIv(); /** * optional bytes outIv = 5; * @return The outIv. */ org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv(); } /** *
   **
   * Cipher option
   * 
* * Protobuf type {@code hadoop.hdfs.CipherOptionProto} */ public static final class CipherOptionProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.CipherOptionProto) CipherOptionProtoOrBuilder { private static final long serialVersionUID = 0L; // Use CipherOptionProto.newBuilder() to construct. private CipherOptionProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private CipherOptionProto() { suite_ = 1; inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new CipherOptionProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class); } private int bitField0_; public static final int SUITE_FIELD_NUMBER = 1; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ @java.lang.Override public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } public static final int INKEY_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes inKey = 2; * @return Whether the inKey field is set. */ @java.lang.Override public boolean hasInKey() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes inKey = 2; * @return The inKey. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getInKey() { return inKey_; } public static final int INIV_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes inIv = 3; * @return Whether the inIv field is set. */ @java.lang.Override public boolean hasInIv() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes inIv = 3; * @return The inIv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getInIv() { return inIv_; } public static final int OUTKEY_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes outKey = 4; * @return Whether the outKey field is set. */ @java.lang.Override public boolean hasOutKey() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes outKey = 4; * @return The outKey. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey() { return outKey_; } public static final int OUTIV_FIELD_NUMBER = 5; private org.apache.hadoop.thirdparty.protobuf.ByteString outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes outIv = 5; * @return Whether the outIv field is set. */ @java.lang.Override public boolean hasOutIv() { return ((bitField0_ & 0x00000010) != 0); } /** * optional bytes outIv = 5; * @return The outIv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv() { return outIv_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, inKey_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, inIv_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, outKey_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeBytes(5, outIv_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, inKey_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, inIv_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, outKey_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(5, outIv_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) obj; if (hasSuite() != other.hasSuite()) return false; if (hasSuite()) { if (suite_ != other.suite_) return false; } if (hasInKey() != other.hasInKey()) return false; if (hasInKey()) { if (!getInKey() .equals(other.getInKey())) return false; } if (hasInIv() != other.hasInIv()) return false; if (hasInIv()) { if (!getInIv() .equals(other.getInIv())) return false; } if (hasOutKey() != other.hasOutKey()) return false; if (hasOutKey()) { if (!getOutKey() .equals(other.getOutKey())) return false; } if (hasOutIv() != other.hasOutIv()) return false; if (hasOutIv()) { if (!getOutIv() .equals(other.getOutIv())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + suite_; } if (hasInKey()) { hash = (37 * hash) + INKEY_FIELD_NUMBER; hash = (53 * hash) + getInKey().hashCode(); } if (hasInIv()) { hash = (37 * hash) + INIV_FIELD_NUMBER; hash = (53 * hash) + getInIv().hashCode(); } if (hasOutKey()) { hash = (37 * hash) + OUTKEY_FIELD_NUMBER; hash = (53 * hash) + getOutKey().hashCode(); } if (hasOutIv()) { hash = (37 * hash) + OUTIV_FIELD_NUMBER; hash = (53 * hash) + getOutIv().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Cipher option
     * 
* * Protobuf type {@code hadoop.hdfs.CipherOptionProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CipherOptionProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; suite_ = 1; inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.suite_ = suite_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.inKey_ = inKey_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.inIv_ = inIv_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.outKey_ = outKey_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.outIv_ = outIv_; to_bitField0_ |= 0x00000010; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasInKey()) { setInKey(other.getInKey()); } if (other.hasInIv()) { setInIv(other.getInIv()); } if (other.hasOutKey()) { setOutKey(other.getOutKey()); } if (other.hasOutIv()) { setOutIv(other.getOutIv()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSuite()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { suite_ = tmpRaw; bitField0_ |= 0x00000001; } break; } // case 8 case 18: { inKey_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { inIv_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 34: { outKey_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 case 42: { outIv_ = input.readBytes(); bitField0_ |= 0x00000010; break; } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return Whether the suite field is set. */ @java.lang.Override public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return The suite. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @param value The suite to set. * @return This builder for chaining. */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; * @return This builder for chaining. */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = 1; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes inKey = 2; * @return Whether the inKey field is set. */ @java.lang.Override public boolean hasInKey() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes inKey = 2; * @return The inKey. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getInKey() { return inKey_; } /** * optional bytes inKey = 2; * @param value The inKey to set. * @return This builder for chaining. */ public Builder setInKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } inKey_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional bytes inKey = 2; * @return This builder for chaining. */ public Builder clearInKey() { bitField0_ = (bitField0_ & ~0x00000002); inKey_ = getDefaultInstance().getInKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes inIv = 3; * @return Whether the inIv field is set. */ @java.lang.Override public boolean hasInIv() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes inIv = 3; * @return The inIv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getInIv() { return inIv_; } /** * optional bytes inIv = 3; * @param value The inIv to set. * @return This builder for chaining. */ public Builder setInIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } inIv_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional bytes inIv = 3; * @return This builder for chaining. */ public Builder clearInIv() { bitField0_ = (bitField0_ & ~0x00000004); inIv_ = getDefaultInstance().getInIv(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes outKey = 4; * @return Whether the outKey field is set. */ @java.lang.Override public boolean hasOutKey() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes outKey = 4; * @return The outKey. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey() { return outKey_; } /** * optional bytes outKey = 4; * @param value The outKey to set. * @return This builder for chaining. */ public Builder setOutKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } outKey_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional bytes outKey = 4; * @return This builder for chaining. */ public Builder clearOutKey() { bitField0_ = (bitField0_ & ~0x00000008); outKey_ = getDefaultInstance().getOutKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes outIv = 5; * @return Whether the outIv field is set. */ @java.lang.Override public boolean hasOutIv() { return ((bitField0_ & 0x00000010) != 0); } /** * optional bytes outIv = 5; * @return The outIv. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv() { return outIv_; } /** * optional bytes outIv = 5; * @param value The outIv to set. * @return This builder for chaining. */ public Builder setOutIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } outIv_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional bytes outIv = 5; * @return This builder for chaining. */ public Builder clearOutIv() { bitField0_ = (bitField0_ & ~0x00000010); outIv_ = getDefaultInstance().getOutIv(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CipherOptionProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CipherOptionProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public CipherOptionProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface LocatedBlocksProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.LocatedBlocksProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 fileLength = 1; * @return Whether the fileLength field is set. */ boolean hasFileLength(); /** * required uint64 fileLength = 1; * @return The fileLength. */ long getFileLength(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ java.util.List getBlocksList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ int getBlocksCount(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ java.util.List getBlocksOrBuilderList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index); /** * required bool underConstruction = 3; * @return Whether the underConstruction field is set. */ boolean hasUnderConstruction(); /** * required bool underConstruction = 3; * @return The underConstruction. */ boolean getUnderConstruction(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; * @return Whether the lastBlock field is set. */ boolean hasLastBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; * @return The lastBlock. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder(); /** * required bool isLastBlockComplete = 5; * @return Whether the isLastBlockComplete field is set. */ boolean hasIsLastBlockComplete(); /** * required bool isLastBlockComplete = 5; * @return The isLastBlockComplete. */ boolean getIsLastBlockComplete(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; * @return Whether the fileEncryptionInfo field is set. */ boolean hasFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; * @return The fileEncryptionInfo. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; * @return Whether the ecPolicy field is set. */ boolean hasEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; * @return The ecPolicy. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder(); } /** *
   **
   * A set of file blocks and their locations.
   * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto} */ public static final class LocatedBlocksProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.LocatedBlocksProto) LocatedBlocksProtoOrBuilder { private static final long serialVersionUID = 0L; // Use LocatedBlocksProto.newBuilder() to construct. private LocatedBlocksProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private LocatedBlocksProto() { blocks_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new LocatedBlocksProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); } private int bitField0_; public static final int FILELENGTH_FIELD_NUMBER = 1; private long fileLength_ = 0L; /** * required uint64 fileLength = 1; * @return Whether the fileLength field is set. */ @java.lang.Override public boolean hasFileLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileLength = 1; * @return The fileLength. */ @java.lang.Override public long getFileLength() { return fileLength_; } public static final int BLOCKS_FIELD_NUMBER = 2; @SuppressWarnings("serial") private java.util.List blocks_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ @java.lang.Override public java.util.List getBlocksList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ @java.lang.Override public java.util.List getBlocksOrBuilderList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ @java.lang.Override public int getBlocksCount() { return blocks_.size(); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { return blocks_.get(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { return blocks_.get(index); } public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3; private boolean underConstruction_ = false; /** * required bool underConstruction = 3; * @return Whether the underConstruction field is set. */ @java.lang.Override public boolean hasUnderConstruction() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool underConstruction = 3; * @return The underConstruction. */ @java.lang.Override public boolean getUnderConstruction() { return underConstruction_; } public static final int LASTBLOCK_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_; /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; * @return Whether the lastBlock field is set. */ @java.lang.Override public boolean hasLastBlock() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; * @return The lastBlock. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5; private boolean isLastBlockComplete_ = false; /** * required bool isLastBlockComplete = 5; * @return Whether the isLastBlockComplete field is set. */ @java.lang.Override public boolean hasIsLastBlockComplete() { return ((bitField0_ & 0x00000008) != 0); } /** * required bool isLastBlockComplete = 5; * @return The isLastBlockComplete. */ @java.lang.Override public boolean getIsLastBlockComplete() { return isLastBlockComplete_; } public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 6; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; * @return Whether the fileEncryptionInfo field is set. */ @java.lang.Override public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; * @return The fileEncryptionInfo. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } public static final int ECPOLICY_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; * @return Whether the ecPolicy field is set. */ @java.lang.Override public boolean hasEcPolicy() { return ((bitField0_ & 0x00000020) != 0); } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; * @return The ecPolicy. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFileLength()) { memoizedIsInitialized = 0; return false; } if (!hasUnderConstruction()) { memoizedIsInitialized = 0; return false; } if (!hasIsLastBlockComplete()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasLastBlock()) { if (!getLastBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, fileLength_); } for (int i = 0; i < blocks_.size(); i++) { output.writeMessage(2, blocks_.get(i)); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(3, underConstruction_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeMessage(4, getLastBlock()); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBool(5, isLastBlockComplete_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(6, getFileEncryptionInfo()); } if (((bitField0_ & 0x00000020) != 0)) { output.writeMessage(7, getEcPolicy()); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, fileLength_); } for (int i = 0; i < blocks_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, blocks_.get(i)); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(3, underConstruction_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getLastBlock()); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(5, isLastBlockComplete_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(6, getFileEncryptionInfo()); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getEcPolicy()); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj; if (hasFileLength() != other.hasFileLength()) return false; if (hasFileLength()) { if (getFileLength() != other.getFileLength()) return false; } if (!getBlocksList() .equals(other.getBlocksList())) return false; if (hasUnderConstruction() != other.hasUnderConstruction()) return false; if (hasUnderConstruction()) { if (getUnderConstruction() != other.getUnderConstruction()) return false; } if (hasLastBlock() != other.hasLastBlock()) return false; if (hasLastBlock()) { if (!getLastBlock() .equals(other.getLastBlock())) return false; } if (hasIsLastBlockComplete() != other.hasIsLastBlockComplete()) return false; if (hasIsLastBlockComplete()) { if (getIsLastBlockComplete() != other.getIsLastBlockComplete()) return false; } if (hasFileEncryptionInfo() != other.hasFileEncryptionInfo()) return false; if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo() .equals(other.getFileEncryptionInfo())) return false; } if (hasEcPolicy() != other.hasEcPolicy()) return false; if (hasEcPolicy()) { if (!getEcPolicy() .equals(other.getEcPolicy())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFileLength()) { hash = (37 * hash) + FILELENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileLength()); } if (getBlocksCount() > 0) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocksList().hashCode(); } if (hasUnderConstruction()) { hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getUnderConstruction()); } if (hasLastBlock()) { hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER; hash = (53 * hash) + getLastBlock().hashCode(); } if (hasIsLastBlockComplete()) { hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsLastBlockComplete()); } if (hasFileEncryptionInfo()) { hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER; hash = (53 * hash) + getFileEncryptionInfo().hashCode(); } if (hasEcPolicy()) { hash = (37 * hash) + ECPOLICY_FIELD_NUMBER; hash = (53 * hash) + getEcPolicy().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A set of file blocks and their locations.
     * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.LocatedBlocksProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getBlocksFieldBuilder(); getLastBlockFieldBuilder(); getFileEncryptionInfoFieldBuilder(); getEcPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; fileLength_ = 0L; if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); } else { blocks_ = null; blocksBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); underConstruction_ = false; lastBlock_ = null; if (lastBlockBuilder_ != null) { lastBlockBuilder_.dispose(); lastBlockBuilder_ = null; } isLastBlockComplete_ = false; fileEncryptionInfo_ = null; if (fileEncryptionInfoBuilder_ != null) { fileEncryptionInfoBuilder_.dispose(); fileEncryptionInfoBuilder_ = null; } ecPolicy_ = null; if (ecPolicyBuilder_ != null) { ecPolicyBuilder_.dispose(); ecPolicyBuilder_ = null; } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result) { if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); bitField0_ = (bitField0_ & ~0x00000002); } result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fileLength_ = fileLength_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000004) != 0)) { result.underConstruction_ = underConstruction_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000008) != 0)) { result.lastBlock_ = lastBlockBuilder_ == null ? lastBlock_ : lastBlockBuilder_.build(); to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000010) != 0)) { result.isLastBlockComplete_ = isLastBlockComplete_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000020) != 0)) { result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_ == null ? fileEncryptionInfo_ : fileEncryptionInfoBuilder_.build(); to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000040) != 0)) { result.ecPolicy_ = ecPolicyBuilder_ == null ? ecPolicy_ : ecPolicyBuilder_.build(); to_bitField0_ |= 0x00000020; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this; if (other.hasFileLength()) { setFileLength(other.getFileLength()); } if (blocksBuilder_ == null) { if (!other.blocks_.isEmpty()) { if (blocks_.isEmpty()) { blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureBlocksIsMutable(); blocks_.addAll(other.blocks_); } onChanged(); } } else { if (!other.blocks_.isEmpty()) { if (blocksBuilder_.isEmpty()) { blocksBuilder_.dispose(); blocksBuilder_ = null; blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000002); blocksBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getBlocksFieldBuilder() : null; } else { blocksBuilder_.addAllMessages(other.blocks_); } } } if (other.hasUnderConstruction()) { setUnderConstruction(other.getUnderConstruction()); } if (other.hasLastBlock()) { mergeLastBlock(other.getLastBlock()); } if (other.hasIsLastBlockComplete()) { setIsLastBlockComplete(other.getIsLastBlockComplete()); } if (other.hasFileEncryptionInfo()) { mergeFileEncryptionInfo(other.getFileEncryptionInfo()); } if (other.hasEcPolicy()) { mergeEcPolicy(other.getEcPolicy()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFileLength()) { return false; } if (!hasUnderConstruction()) { return false; } if (!hasIsLastBlockComplete()) { return false; } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { return false; } } if (hasLastBlock()) { if (!getLastBlock().isInitialized()) { return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { fileLength_ = input.readUInt64(); bitField0_ |= 0x00000001; break; } // case 8 case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(m); } else { blocksBuilder_.addMessage(m); } break; } // case 18 case 24: { underConstruction_ = input.readBool(); bitField0_ |= 0x00000004; break; } // case 24 case 34: { input.readMessage( getLastBlockFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000008; break; } // case 34 case 40: { isLastBlockComplete_ = input.readBool(); bitField0_ |= 0x00000010; break; } // case 40 case 50: { input.readMessage( getFileEncryptionInfoFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000020; break; } // case 50 case 58: { input.readMessage( getEcPolicyFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000040; break; } // case 58 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private long fileLength_ ; /** * required uint64 fileLength = 1; * @return Whether the fileLength field is set. */ @java.lang.Override public boolean hasFileLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileLength = 1; * @return The fileLength. */ @java.lang.Override public long getFileLength() { return fileLength_; } /** * required uint64 fileLength = 1; * @param value The fileLength to set. * @return This builder for chaining. */ public Builder setFileLength(long value) { fileLength_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required uint64 fileLength = 1; * @return This builder for chaining. */ public Builder clearFileLength() { bitField0_ = (bitField0_ & ~0x00000001); fileLength_ = 0L; onChanged(); return this; } private java.util.List blocks_ = java.util.Collections.emptyList(); private void ensureBlocksIsMutable() { if (!((bitField0_ & 0x00000002) != 0)) { blocks_ = new java.util.ArrayList(blocks_); bitField0_ |= 0x00000002; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksList() { if (blocksBuilder_ == null) { return java.util.Collections.unmodifiableList(blocks_); } else { return blocksBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public int getBlocksCount() { if (blocksBuilder_ == null) { return blocks_.size(); } else { return blocksBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.set(index, value); onChanged(); } else { blocksBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.set(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(value); onChanged(); } else { blocksBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(index, value); onChanged(); } else { blocksBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addAllBlocks( java.lang.Iterable values) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, blocks_); onChanged(); } else { blocksBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { blocksBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder removeBlocks(int index) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.remove(index); onChanged(); } else { blocksBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder( int index) { return getBlocksFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksOrBuilderList() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blocks_); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() { return getBlocksFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder( int index) { return getBlocksFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksBuilderList() { return getBlocksFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( blocks_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } private boolean underConstruction_ ; /** * required bool underConstruction = 3; * @return Whether the underConstruction field is set. */ @java.lang.Override public boolean hasUnderConstruction() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool underConstruction = 3; * @return The underConstruction. */ @java.lang.Override public boolean getUnderConstruction() { return underConstruction_; } /** * required bool underConstruction = 3; * @param value The underConstruction to set. * @return This builder for chaining. */ public Builder setUnderConstruction(boolean value) { underConstruction_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required bool underConstruction = 3; * @return This builder for chaining. */ public Builder clearUnderConstruction() { bitField0_ = (bitField0_ & ~0x00000004); underConstruction_ = false; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_; /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; * @return Whether the lastBlock field is set. */ public boolean hasLastBlock() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; * @return The lastBlock. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { if (lastBlockBuilder_ == null) { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } else { return lastBlockBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (lastBlockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } lastBlock_ = value; } else { lastBlockBuilder_.setMessage(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder setLastBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (lastBlockBuilder_ == null) { lastBlock_ = builderForValue.build(); } else { lastBlockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (lastBlockBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && lastBlock_ != null && lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { getLastBlockBuilder().mergeFrom(value); } else { lastBlock_ = value; } } else { lastBlockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder clearLastBlock() { bitField0_ = (bitField0_ & ~0x00000008); lastBlock_ = null; if (lastBlockBuilder_ != null) { lastBlockBuilder_.dispose(); lastBlockBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() { bitField0_ |= 0x00000008; onChanged(); return getLastBlockFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { if (lastBlockBuilder_ != null) { return lastBlockBuilder_.getMessageOrBuilder(); } else { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getLastBlockFieldBuilder() { if (lastBlockBuilder_ == null) { lastBlockBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( getLastBlock(), getParentForChildren(), isClean()); lastBlock_ = null; } return lastBlockBuilder_; } private boolean isLastBlockComplete_ ; /** * required bool isLastBlockComplete = 5; * @return Whether the isLastBlockComplete field is set. */ @java.lang.Override public boolean hasIsLastBlockComplete() { return ((bitField0_ & 0x00000010) != 0); } /** * required bool isLastBlockComplete = 5; * @return The isLastBlockComplete. */ @java.lang.Override public boolean getIsLastBlockComplete() { return isLastBlockComplete_; } /** * required bool isLastBlockComplete = 5; * @param value The isLastBlockComplete to set. * @return This builder for chaining. */ public Builder setIsLastBlockComplete(boolean value) { isLastBlockComplete_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required bool isLastBlockComplete = 5; * @return This builder for chaining. */ public Builder clearIsLastBlockComplete() { bitField0_ = (bitField0_ & ~0x00000010); isLastBlockComplete_ = false; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; * @return Whether the fileEncryptionInfo field is set. */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00000020) != 0); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; * @return The fileEncryptionInfo. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } else { return fileEncryptionInfoBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fileEncryptionInfo_ = value; } else { fileEncryptionInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder setFileEncryptionInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = builderForValue.build(); } else { fileEncryptionInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (((bitField0_ & 0x00000020) != 0) && fileEncryptionInfo_ != null && fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) { getFileEncryptionInfoBuilder().mergeFrom(value); } else { fileEncryptionInfo_ = value; } } else { fileEncryptionInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder clearFileEncryptionInfo() { bitField0_ = (bitField0_ & ~0x00000020); fileEncryptionInfo_ = null; if (fileEncryptionInfoBuilder_ != null) { fileEncryptionInfoBuilder_.dispose(); fileEncryptionInfoBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() { bitField0_ |= 0x00000020; onChanged(); return getFileEncryptionInfoFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { if (fileEncryptionInfoBuilder_ != null) { return fileEncryptionInfoBuilder_.getMessageOrBuilder(); } else { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> getFileEncryptionInfoFieldBuilder() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>( getFileEncryptionInfo(), getParentForChildren(), isClean()); fileEncryptionInfo_ = null; } return fileEncryptionInfoBuilder_; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_; /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; * @return Whether the ecPolicy field is set. */ public boolean hasEcPolicy() { return ((bitField0_ & 0x00000040) != 0); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; * @return The ecPolicy. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { if (ecPolicyBuilder_ == null) { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } else { return ecPolicyBuilder_.getMessage(); } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ecPolicy_ = value; } else { ecPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000040; onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder setEcPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (ecPolicyBuilder_ == null) { ecPolicy_ = builderForValue.build(); } else { ecPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && ecPolicy_ != null && ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) { getEcPolicyBuilder().mergeFrom(value); } else { ecPolicy_ = value; } } else { ecPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder clearEcPolicy() { bitField0_ = (bitField0_ & ~0x00000040); ecPolicy_ = null; if (ecPolicyBuilder_ != null) { ecPolicyBuilder_.dispose(); ecPolicyBuilder_ = null; } onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() { bitField0_ |= 0x00000040; onChanged(); return getEcPolicyFieldBuilder().getBuilder(); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { if (ecPolicyBuilder_ != null) { return ecPolicyBuilder_.getMessageOrBuilder(); } else { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> getEcPolicyFieldBuilder() { if (ecPolicyBuilder_ == null) { ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>( getEcPolicy(), getParentForChildren(), isClean()); ecPolicy_ = null; } return ecPolicyBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlocksProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlocksProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public LocatedBlocksProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ECSchemaOptionEntryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECSchemaOptionEntryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string key = 1; * @return Whether the key field is set. */ boolean hasKey(); /** * required string key = 1; * @return The key. */ java.lang.String getKey(); /** * required string key = 1; * @return The bytes for key. */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyBytes(); /** * required string value = 2; * @return Whether the value field is set. */ boolean hasValue(); /** * required string value = 2; * @return The value. */ java.lang.String getValue(); /** * required string value = 2; * @return The bytes for value. */ org.apache.hadoop.thirdparty.protobuf.ByteString getValueBytes(); } /** *
   **
   * ECSchema options entry
   * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto} */ public static final class ECSchemaOptionEntryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECSchemaOptionEntryProto) ECSchemaOptionEntryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ECSchemaOptionEntryProto.newBuilder() to construct. private ECSchemaOptionEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ECSchemaOptionEntryProto() { key_ = ""; value_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ECSchemaOptionEntryProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class); } private int bitField0_; public static final int KEY_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object key_ = ""; /** * required string key = 1; * @return Whether the key field is set. */ @java.lang.Override public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required string key = 1; * @return The key. */ @java.lang.Override public java.lang.String getKey() { java.lang.Object ref = key_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { key_ = s; } return s; } } /** * required string key = 1; * @return The bytes for key. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); key_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int VALUE_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object value_ = ""; /** * required string value = 2; * @return Whether the value field is set. */ @java.lang.Override public boolean hasValue() { return ((bitField0_ & 0x00000002) != 0); } /** * required string value = 2; * @return The value. */ @java.lang.Override public java.lang.String getValue() { java.lang.Object ref = value_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { value_ = s; } return s; } } /** * required string value = 2; * @return The bytes for value. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getValueBytes() { java.lang.Object ref = value_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); value_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasValue()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, key_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, value_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, key_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, value_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) obj; if (hasKey() != other.hasKey()) return false; if (hasKey()) { if (!getKey() .equals(other.getKey())) return false; } if (hasValue() != other.hasValue()) return false; if (hasValue()) { if (!getValue() .equals(other.getValue())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasValue()) { hash = (37 * hash) + VALUE_FIELD_NUMBER; hash = (53 * hash) + getValue().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * ECSchema options entry
     * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECSchemaOptionEntryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; key_ = ""; value_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.key_ = key_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.value_ = value_; to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()) return this; if (other.hasKey()) { key_ = other.key_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasValue()) { value_ = other.value_; bitField0_ |= 0x00000002; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasKey()) { return false; } if (!hasValue()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { key_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { value_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object key_ = ""; /** * required string key = 1; * @return Whether the key field is set. */ public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required string key = 1; * @return The key. */ public java.lang.String getKey() { java.lang.Object ref = key_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { key_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string key = 1; * @return The bytes for key. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); key_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string key = 1; * @param value The key to set. * @return This builder for chaining. */ public Builder setKey( java.lang.String value) { if (value == null) { throw new NullPointerException(); } key_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string key = 1; * @return This builder for chaining. */ public Builder clearKey() { key_ = getDefaultInstance().getKey(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string key = 1; * @param value The bytes for key to set. * @return This builder for chaining. */ public Builder setKeyBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } key_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private java.lang.Object value_ = ""; /** * required string value = 2; * @return Whether the value field is set. */ public boolean hasValue() { return ((bitField0_ & 0x00000002) != 0); } /** * required string value = 2; * @return The value. */ public java.lang.String getValue() { java.lang.Object ref = value_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { value_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string value = 2; * @return The bytes for value. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getValueBytes() { java.lang.Object ref = value_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); value_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string value = 2; * @param value The value to set. * @return This builder for chaining. */ public Builder setValue( java.lang.String value) { if (value == null) { throw new NullPointerException(); } value_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string value = 2; * @return This builder for chaining. */ public Builder clearValue() { value_ = getDefaultInstance().getValue(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string value = 2; * @param value The bytes for value to set. * @return This builder for chaining. */ public Builder setValueBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } value_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaOptionEntryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaOptionEntryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ECSchemaOptionEntryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ECSchemaProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECSchemaProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string codecName = 1; * @return Whether the codecName field is set. */ boolean hasCodecName(); /** * required string codecName = 1; * @return The codecName. */ java.lang.String getCodecName(); /** * required string codecName = 1; * @return The bytes for codecName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getCodecNameBytes(); /** * required uint32 dataUnits = 2; * @return Whether the dataUnits field is set. */ boolean hasDataUnits(); /** * required uint32 dataUnits = 2; * @return The dataUnits. */ int getDataUnits(); /** * required uint32 parityUnits = 3; * @return Whether the parityUnits field is set. */ boolean hasParityUnits(); /** * required uint32 parityUnits = 3; * @return The parityUnits. */ int getParityUnits(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ java.util.List getOptionsList(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ int getOptionsCount(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ java.util.List getOptionsOrBuilderList(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder( int index); } /** *
   **
   * ECSchema for erasurecoding
   * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaProto} */ public static final class ECSchemaProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECSchemaProto) ECSchemaProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ECSchemaProto.newBuilder() to construct. private ECSchemaProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ECSchemaProto() { codecName_ = ""; options_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ECSchemaProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class); } private int bitField0_; public static final int CODECNAME_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object codecName_ = ""; /** * required string codecName = 1; * @return Whether the codecName field is set. */ @java.lang.Override public boolean hasCodecName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string codecName = 1; * @return The codecName. */ @java.lang.Override public java.lang.String getCodecName() { java.lang.Object ref = codecName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { codecName_ = s; } return s; } } /** * required string codecName = 1; * @return The bytes for codecName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getCodecNameBytes() { java.lang.Object ref = codecName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); codecName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int DATAUNITS_FIELD_NUMBER = 2; private int dataUnits_ = 0; /** * required uint32 dataUnits = 2; * @return Whether the dataUnits field is set. */ @java.lang.Override public boolean hasDataUnits() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 dataUnits = 2; * @return The dataUnits. */ @java.lang.Override public int getDataUnits() { return dataUnits_; } public static final int PARITYUNITS_FIELD_NUMBER = 3; private int parityUnits_ = 0; /** * required uint32 parityUnits = 3; * @return Whether the parityUnits field is set. */ @java.lang.Override public boolean hasParityUnits() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 parityUnits = 3; * @return The parityUnits. */ @java.lang.Override public int getParityUnits() { return parityUnits_; } public static final int OPTIONS_FIELD_NUMBER = 4; @SuppressWarnings("serial") private java.util.List options_; /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ @java.lang.Override public java.util.List getOptionsList() { return options_; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ @java.lang.Override public java.util.List getOptionsOrBuilderList() { return options_; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ @java.lang.Override public int getOptionsCount() { return options_.size(); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) { return options_.get(index); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder( int index) { return options_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasCodecName()) { memoizedIsInitialized = 0; return false; } if (!hasDataUnits()) { memoizedIsInitialized = 0; return false; } if (!hasParityUnits()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getOptionsCount(); i++) { if (!getOptions(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, codecName_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, dataUnits_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, parityUnits_); } for (int i = 0; i < options_.size(); i++) { output.writeMessage(4, options_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, codecName_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, dataUnits_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, parityUnits_); } for (int i = 0; i < options_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, options_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) obj; if (hasCodecName() != other.hasCodecName()) return false; if (hasCodecName()) { if (!getCodecName() .equals(other.getCodecName())) return false; } if (hasDataUnits() != other.hasDataUnits()) return false; if (hasDataUnits()) { if (getDataUnits() != other.getDataUnits()) return false; } if (hasParityUnits() != other.hasParityUnits()) return false; if (hasParityUnits()) { if (getParityUnits() != other.getParityUnits()) return false; } if (!getOptionsList() .equals(other.getOptionsList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasCodecName()) { hash = (37 * hash) + CODECNAME_FIELD_NUMBER; hash = (53 * hash) + getCodecName().hashCode(); } if (hasDataUnits()) { hash = (37 * hash) + DATAUNITS_FIELD_NUMBER; hash = (53 * hash) + getDataUnits(); } if (hasParityUnits()) { hash = (37 * hash) + PARITYUNITS_FIELD_NUMBER; hash = (53 * hash) + getParityUnits(); } if (getOptionsCount() > 0) { hash = (37 * hash) + OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getOptionsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * ECSchema for erasurecoding
     * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECSchemaProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; codecName_ = ""; dataUnits_ = 0; parityUnits_ = 0; if (optionsBuilder_ == null) { options_ = java.util.Collections.emptyList(); } else { options_ = null; optionsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result) { if (optionsBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0)) { options_ = java.util.Collections.unmodifiableList(options_); bitField0_ = (bitField0_ & ~0x00000008); } result.options_ = options_; } else { result.options_ = optionsBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.codecName_ = codecName_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.dataUnits_ = dataUnits_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.parityUnits_ = parityUnits_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) return this; if (other.hasCodecName()) { codecName_ = other.codecName_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasDataUnits()) { setDataUnits(other.getDataUnits()); } if (other.hasParityUnits()) { setParityUnits(other.getParityUnits()); } if (optionsBuilder_ == null) { if (!other.options_.isEmpty()) { if (options_.isEmpty()) { options_ = other.options_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureOptionsIsMutable(); options_.addAll(other.options_); } onChanged(); } } else { if (!other.options_.isEmpty()) { if (optionsBuilder_.isEmpty()) { optionsBuilder_.dispose(); optionsBuilder_ = null; options_ = other.options_; bitField0_ = (bitField0_ & ~0x00000008); optionsBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getOptionsFieldBuilder() : null; } else { optionsBuilder_.addAllMessages(other.options_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasCodecName()) { return false; } if (!hasDataUnits()) { return false; } if (!hasParityUnits()) { return false; } for (int i = 0; i < getOptionsCount(); i++) { if (!getOptions(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { codecName_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { dataUnits_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { parityUnits_ = input.readUInt32(); bitField0_ |= 0x00000004; break; } // case 24 case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.PARSER, extensionRegistry); if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.add(m); } else { optionsBuilder_.addMessage(m); } break; } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object codecName_ = ""; /** * required string codecName = 1; * @return Whether the codecName field is set. */ public boolean hasCodecName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string codecName = 1; * @return The codecName. */ public java.lang.String getCodecName() { java.lang.Object ref = codecName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { codecName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string codecName = 1; * @return The bytes for codecName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCodecNameBytes() { java.lang.Object ref = codecName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); codecName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string codecName = 1; * @param value The codecName to set. * @return This builder for chaining. */ public Builder setCodecName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } codecName_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string codecName = 1; * @return This builder for chaining. */ public Builder clearCodecName() { codecName_ = getDefaultInstance().getCodecName(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string codecName = 1; * @param value The bytes for codecName to set. * @return This builder for chaining. */ public Builder setCodecNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } codecName_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private int dataUnits_ ; /** * required uint32 dataUnits = 2; * @return Whether the dataUnits field is set. */ @java.lang.Override public boolean hasDataUnits() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 dataUnits = 2; * @return The dataUnits. */ @java.lang.Override public int getDataUnits() { return dataUnits_; } /** * required uint32 dataUnits = 2; * @param value The dataUnits to set. * @return This builder for chaining. */ public Builder setDataUnits(int value) { dataUnits_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint32 dataUnits = 2; * @return This builder for chaining. */ public Builder clearDataUnits() { bitField0_ = (bitField0_ & ~0x00000002); dataUnits_ = 0; onChanged(); return this; } private int parityUnits_ ; /** * required uint32 parityUnits = 3; * @return Whether the parityUnits field is set. */ @java.lang.Override public boolean hasParityUnits() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 parityUnits = 3; * @return The parityUnits. */ @java.lang.Override public int getParityUnits() { return parityUnits_; } /** * required uint32 parityUnits = 3; * @param value The parityUnits to set. * @return This builder for chaining. */ public Builder setParityUnits(int value) { parityUnits_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint32 parityUnits = 3; * @return This builder for chaining. */ public Builder clearParityUnits() { bitField0_ = (bitField0_ & ~0x00000004); parityUnits_ = 0; onChanged(); return this; } private java.util.List options_ = java.util.Collections.emptyList(); private void ensureOptionsIsMutable() { if (!((bitField0_ & 0x00000008) != 0)) { options_ = new java.util.ArrayList(options_); bitField0_ |= 0x00000008; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> optionsBuilder_; /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsList() { if (optionsBuilder_ == null) { return java.util.Collections.unmodifiableList(options_); } else { return optionsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public int getOptionsCount() { if (optionsBuilder_ == null) { return options_.size(); } else { return optionsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) { if (optionsBuilder_ == null) { return options_.get(index); } else { return optionsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder setOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) { if (optionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureOptionsIsMutable(); options_.set(index, value); onChanged(); } else { optionsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder setOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.set(index, builderForValue.build()); onChanged(); } else { optionsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) { if (optionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureOptionsIsMutable(); options_.add(value); onChanged(); } else { optionsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) { if (optionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureOptionsIsMutable(); options_.add(index, value); onChanged(); } else { optionsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.add(builderForValue.build()); onChanged(); } else { optionsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.add(index, builderForValue.build()); onChanged(); } else { optionsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addAllOptions( java.lang.Iterable values) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, options_); onChanged(); } else { optionsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder clearOptions() { if (optionsBuilder_ == null) { options_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { optionsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder removeOptions(int index) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.remove(index); onChanged(); } else { optionsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder getOptionsBuilder( int index) { return getOptionsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder( int index) { if (optionsBuilder_ == null) { return options_.get(index); } else { return optionsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsOrBuilderList() { if (optionsBuilder_ != null) { return optionsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(options_); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder() { return getOptionsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder( int index) { return getOptionsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsBuilderList() { return getOptionsFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> getOptionsFieldBuilder() { if (optionsBuilder_ == null) { optionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>( options_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); options_ = null; } return optionsBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ECSchemaProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ErasureCodingPolicyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ErasureCodingPolicyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional string name = 1; * @return Whether the name field is set. */ boolean hasName(); /** * optional string name = 1; * @return The name. */ java.lang.String getName(); /** * optional string name = 1; * @return The bytes for name. */ org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes(); /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; * @return Whether the schema field is set. */ boolean hasSchema(); /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; * @return The schema. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema(); /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder(); /** * optional uint32 cellSize = 3; * @return Whether the cellSize field is set. */ boolean hasCellSize(); /** * optional uint32 cellSize = 3; * @return The cellSize. */ int getCellSize(); /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; * @return Whether the id field is set. */ boolean hasId(); /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; * @return The id. */ int getId(); /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @return Whether the state field is set. */ boolean hasState(); /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @return The state. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState(); } /** * Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto} */ public static final class ErasureCodingPolicyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ErasureCodingPolicyProto) ErasureCodingPolicyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ErasureCodingPolicyProto.newBuilder() to construct. private ErasureCodingPolicyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ErasureCodingPolicyProto() { name_ = ""; state_ = 2; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ErasureCodingPolicyProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class); } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object name_ = ""; /** * optional string name = 1; * @return Whether the name field is set. */ @java.lang.Override public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string name = 1; * @return The name. */ @java.lang.Override public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * optional string name = 1; * @return The bytes for name. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int SCHEMA_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_; /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; * @return Whether the schema field is set. */ @java.lang.Override public boolean hasSchema() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; * @return The schema. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } public static final int CELLSIZE_FIELD_NUMBER = 3; private int cellSize_ = 0; /** * optional uint32 cellSize = 3; * @return Whether the cellSize field is set. */ @java.lang.Override public boolean hasCellSize() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 cellSize = 3; * @return The cellSize. */ @java.lang.Override public int getCellSize() { return cellSize_; } public static final int ID_FIELD_NUMBER = 4; private int id_ = 0; /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; * @return Whether the id field is set. */ @java.lang.Override public boolean hasId() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; * @return The id. */ @java.lang.Override public int getId() { return id_; } public static final int STATE_FIELD_NUMBER = 5; private int state_ = 2; /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @return Whether the state field is set. */ @java.lang.Override public boolean hasState() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @return The state. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.forNumber(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED : result; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasId()) { memoizedIsInitialized = 0; return false; } if (hasSchema()) { if (!getSchema().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, name_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(2, getSchema()); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, cellSize_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, id_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeEnum(5, state_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, name_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, getSchema()); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, cellSize_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, id_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(5, state_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) obj; if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasSchema() != other.hasSchema()) return false; if (hasSchema()) { if (!getSchema() .equals(other.getSchema())) return false; } if (hasCellSize() != other.hasCellSize()) return false; if (hasCellSize()) { if (getCellSize() != other.getCellSize()) return false; } if (hasId() != other.hasId()) return false; if (hasId()) { if (getId() != other.getId()) return false; } if (hasState() != other.hasState()) return false; if (hasState()) { if (state_ != other.state_) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasSchema()) { hash = (37 * hash) + SCHEMA_FIELD_NUMBER; hash = (53 * hash) + getSchema().hashCode(); } if (hasCellSize()) { hash = (37 * hash) + CELLSIZE_FIELD_NUMBER; hash = (53 * hash) + getCellSize(); } if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + getId(); } if (hasState()) { hash = (37 * hash) + STATE_FIELD_NUMBER; hash = (53 * hash) + state_; } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ErasureCodingPolicyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSchemaFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; name_ = ""; schema_ = null; if (schemaBuilder_ != null) { schemaBuilder_.dispose(); schemaBuilder_ = null; } cellSize_ = 0; id_ = 0; state_ = 2; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.name_ = name_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.schema_ = schemaBuilder_ == null ? schema_ : schemaBuilder_.build(); to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.cellSize_ = cellSize_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.id_ = id_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.state_ = state_; to_bitField0_ |= 0x00000010; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) return this; if (other.hasName()) { name_ = other.name_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasSchema()) { mergeSchema(other.getSchema()); } if (other.hasCellSize()) { setCellSize(other.getCellSize()); } if (other.hasId()) { setId(other.getId()); } if (other.hasState()) { setState(other.getState()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasId()) { return false; } if (hasSchema()) { if (!getSchema().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { name_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { input.readMessage( getSchemaFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000002; break; } // case 18 case 24: { cellSize_ = input.readUInt32(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { id_ = input.readUInt32(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(5, tmpRaw); } else { state_ = tmpRaw; bitField0_ |= 0x00000010; } break; } // case 40 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object name_ = ""; /** * optional string name = 1; * @return Whether the name field is set. */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string name = 1; * @return The name. */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string name = 1; * @return The bytes for name. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string name = 1; * @param value The name to set. * @return This builder for chaining. */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } name_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * optional string name = 1; * @return This builder for chaining. */ public Builder clearName() { name_ = getDefaultInstance().getName(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * optional string name = 1; * @param value The bytes for name to set. * @return This builder for chaining. */ public Builder setNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } name_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder> schemaBuilder_; /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; * @return Whether the schema field is set. */ public boolean hasSchema() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; * @return The schema. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() { if (schemaBuilder_ == null) { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } else { return schemaBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder setSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) { if (schemaBuilder_ == null) { if (value == null) { throw new NullPointerException(); } schema_ = value; } else { schemaBuilder_.setMessage(value); } bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder setSchema( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder builderForValue) { if (schemaBuilder_ == null) { schema_ = builderForValue.build(); } else { schemaBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder mergeSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) { if (schemaBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0) && schema_ != null && schema_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) { getSchemaBuilder().mergeFrom(value); } else { schema_ = value; } } else { schemaBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder clearSchema() { bitField0_ = (bitField0_ & ~0x00000002); schema_ = null; if (schemaBuilder_ != null) { schemaBuilder_.dispose(); schemaBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder getSchemaBuilder() { bitField0_ |= 0x00000002; onChanged(); return getSchemaFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() { if (schemaBuilder_ != null) { return schemaBuilder_.getMessageOrBuilder(); } else { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder> getSchemaFieldBuilder() { if (schemaBuilder_ == null) { schemaBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder>( getSchema(), getParentForChildren(), isClean()); schema_ = null; } return schemaBuilder_; } private int cellSize_ ; /** * optional uint32 cellSize = 3; * @return Whether the cellSize field is set. */ @java.lang.Override public boolean hasCellSize() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 cellSize = 3; * @return The cellSize. */ @java.lang.Override public int getCellSize() { return cellSize_; } /** * optional uint32 cellSize = 3; * @param value The cellSize to set. * @return This builder for chaining. */ public Builder setCellSize(int value) { cellSize_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional uint32 cellSize = 3; * @return This builder for chaining. */ public Builder clearCellSize() { bitField0_ = (bitField0_ & ~0x00000004); cellSize_ = 0; onChanged(); return this; } private int id_ ; /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; * @return Whether the id field is set. */ @java.lang.Override public boolean hasId() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; * @return The id. */ @java.lang.Override public int getId() { return id_; } /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; * @param value The id to set. * @return This builder for chaining. */ public Builder setId(int value) { id_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; * @return This builder for chaining. */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000008); id_ = 0; onChanged(); return this; } private int state_ = 2; /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @return Whether the state field is set. */ @java.lang.Override public boolean hasState() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @return The state. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.forNumber(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED : result; } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @param value The state to set. * @return This builder for chaining. */ public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; state_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; * @return This builder for chaining. */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000010); state_ = 2; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ErasureCodingPolicyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ErasureCodingPolicyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ErasureCodingPolicyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface AddErasureCodingPolicyResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.AddErasureCodingPolicyResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; * @return Whether the policy field is set. */ boolean hasPolicy(); /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; * @return The policy. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy(); /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder(); /** * required bool succeed = 2; * @return Whether the succeed field is set. */ boolean hasSucceed(); /** * required bool succeed = 2; * @return The succeed. */ boolean getSucceed(); /** * optional string errorMsg = 3; * @return Whether the errorMsg field is set. */ boolean hasErrorMsg(); /** * optional string errorMsg = 3; * @return The errorMsg. */ java.lang.String getErrorMsg(); /** * optional string errorMsg = 3; * @return The bytes for errorMsg. */ org.apache.hadoop.thirdparty.protobuf.ByteString getErrorMsgBytes(); } /** * Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto} */ public static final class AddErasureCodingPolicyResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.AddErasureCodingPolicyResponseProto) AddErasureCodingPolicyResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use AddErasureCodingPolicyResponseProto.newBuilder() to construct. private AddErasureCodingPolicyResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private AddErasureCodingPolicyResponseProto() { errorMsg_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new AddErasureCodingPolicyResponseProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class); } private int bitField0_; public static final int POLICY_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_; /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; * @return Whether the policy field is set. */ @java.lang.Override public boolean hasPolicy() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; * @return The policy. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } public static final int SUCCEED_FIELD_NUMBER = 2; private boolean succeed_ = false; /** * required bool succeed = 2; * @return Whether the succeed field is set. */ @java.lang.Override public boolean hasSucceed() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool succeed = 2; * @return The succeed. */ @java.lang.Override public boolean getSucceed() { return succeed_; } public static final int ERRORMSG_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object errorMsg_ = ""; /** * optional string errorMsg = 3; * @return Whether the errorMsg field is set. */ @java.lang.Override public boolean hasErrorMsg() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string errorMsg = 3; * @return The errorMsg. */ @java.lang.Override public java.lang.String getErrorMsg() { java.lang.Object ref = errorMsg_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { errorMsg_ = s; } return s; } } /** * optional string errorMsg = 3; * @return The bytes for errorMsg. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getErrorMsgBytes() { java.lang.Object ref = errorMsg_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMsg_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPolicy()) { memoizedIsInitialized = 0; return false; } if (!hasSucceed()) { memoizedIsInitialized = 0; return false; } if (!getPolicy().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getPolicy()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, succeed_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, errorMsg_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getPolicy()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, succeed_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, errorMsg_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) obj; if (hasPolicy() != other.hasPolicy()) return false; if (hasPolicy()) { if (!getPolicy() .equals(other.getPolicy())) return false; } if (hasSucceed() != other.hasSucceed()) return false; if (hasSucceed()) { if (getSucceed() != other.getSucceed()) return false; } if (hasErrorMsg() != other.hasErrorMsg()) return false; if (hasErrorMsg()) { if (!getErrorMsg() .equals(other.getErrorMsg())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPolicy()) { hash = (37 * hash) + POLICY_FIELD_NUMBER; hash = (53 * hash) + getPolicy().hashCode(); } if (hasSucceed()) { hash = (37 * hash) + SUCCEED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getSucceed()); } if (hasErrorMsg()) { hash = (37 * hash) + ERRORMSG_FIELD_NUMBER; hash = (53 * hash) + getErrorMsg().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.AddErasureCodingPolicyResponseProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; policy_ = null; if (policyBuilder_ != null) { policyBuilder_.dispose(); policyBuilder_ = null; } succeed_ = false; errorMsg_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.policy_ = policyBuilder_ == null ? policy_ : policyBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.succeed_ = succeed_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.errorMsg_ = errorMsg_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance()) return this; if (other.hasPolicy()) { mergePolicy(other.getPolicy()); } if (other.hasSucceed()) { setSucceed(other.getSucceed()); } if (other.hasErrorMsg()) { errorMsg_ = other.errorMsg_; bitField0_ |= 0x00000004; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPolicy()) { return false; } if (!hasSucceed()) { return false; } if (!getPolicy().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { input.readMessage( getPolicyFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000001; break; } // case 10 case 16: { succeed_ = input.readBool(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { errorMsg_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> policyBuilder_; /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; * @return Whether the policy field is set. */ public boolean hasPolicy() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; * @return The policy. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() { if (policyBuilder_ == null) { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } else { return policyBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder setPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (policyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } policy_ = value; } else { policyBuilder_.setMessage(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder setPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (policyBuilder_ == null) { policy_ = builderForValue.build(); } else { policyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder mergePolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (policyBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && policy_ != null && policy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) { getPolicyBuilder().mergeFrom(value); } else { policy_ = value; } } else { policyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder clearPolicy() { bitField0_ = (bitField0_ & ~0x00000001); policy_ = null; if (policyBuilder_ != null) { policyBuilder_.dispose(); policyBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getPolicyBuilder() { bitField0_ |= 0x00000001; onChanged(); return getPolicyFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() { if (policyBuilder_ != null) { return policyBuilder_.getMessageOrBuilder(); } else { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> getPolicyFieldBuilder() { if (policyBuilder_ == null) { policyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>( getPolicy(), getParentForChildren(), isClean()); policy_ = null; } return policyBuilder_; } private boolean succeed_ ; /** * required bool succeed = 2; * @return Whether the succeed field is set. */ @java.lang.Override public boolean hasSucceed() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool succeed = 2; * @return The succeed. */ @java.lang.Override public boolean getSucceed() { return succeed_; } /** * required bool succeed = 2; * @param value The succeed to set. * @return This builder for chaining. */ public Builder setSucceed(boolean value) { succeed_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required bool succeed = 2; * @return This builder for chaining. */ public Builder clearSucceed() { bitField0_ = (bitField0_ & ~0x00000002); succeed_ = false; onChanged(); return this; } private java.lang.Object errorMsg_ = ""; /** * optional string errorMsg = 3; * @return Whether the errorMsg field is set. */ public boolean hasErrorMsg() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string errorMsg = 3; * @return The errorMsg. */ public java.lang.String getErrorMsg() { java.lang.Object ref = errorMsg_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { errorMsg_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string errorMsg = 3; * @return The bytes for errorMsg. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getErrorMsgBytes() { java.lang.Object ref = errorMsg_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMsg_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string errorMsg = 3; * @param value The errorMsg to set. * @return This builder for chaining. */ public Builder setErrorMsg( java.lang.String value) { if (value == null) { throw new NullPointerException(); } errorMsg_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional string errorMsg = 3; * @return This builder for chaining. */ public Builder clearErrorMsg() { errorMsg_ = getDefaultInstance().getErrorMsg(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * optional string errorMsg = 3; * @param value The bytes for errorMsg to set. * @return This builder for chaining. */ public Builder setErrorMsgBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } errorMsg_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public AddErasureCodingPolicyResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ECTopologyVerifierResultProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECTopologyVerifierResultProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string resultMessage = 1; * @return Whether the resultMessage field is set. */ boolean hasResultMessage(); /** * required string resultMessage = 1; * @return The resultMessage. */ java.lang.String getResultMessage(); /** * required string resultMessage = 1; * @return The bytes for resultMessage. */ org.apache.hadoop.thirdparty.protobuf.ByteString getResultMessageBytes(); /** * required bool isSupported = 2; * @return Whether the isSupported field is set. */ boolean hasIsSupported(); /** * required bool isSupported = 2; * @return The isSupported. */ boolean getIsSupported(); } /** * Protobuf type {@code hadoop.hdfs.ECTopologyVerifierResultProto} */ public static final class ECTopologyVerifierResultProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECTopologyVerifierResultProto) ECTopologyVerifierResultProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ECTopologyVerifierResultProto.newBuilder() to construct. private ECTopologyVerifierResultProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ECTopologyVerifierResultProto() { resultMessage_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new ECTopologyVerifierResultProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.Builder.class); } private int bitField0_; public static final int RESULTMESSAGE_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object resultMessage_ = ""; /** * required string resultMessage = 1; * @return Whether the resultMessage field is set. */ @java.lang.Override public boolean hasResultMessage() { return ((bitField0_ & 0x00000001) != 0); } /** * required string resultMessage = 1; * @return The resultMessage. */ @java.lang.Override public java.lang.String getResultMessage() { java.lang.Object ref = resultMessage_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { resultMessage_ = s; } return s; } } /** * required string resultMessage = 1; * @return The bytes for resultMessage. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getResultMessageBytes() { java.lang.Object ref = resultMessage_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); resultMessage_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int ISSUPPORTED_FIELD_NUMBER = 2; private boolean isSupported_ = false; /** * required bool isSupported = 2; * @return Whether the isSupported field is set. */ @java.lang.Override public boolean hasIsSupported() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool isSupported = 2; * @return The isSupported. */ @java.lang.Override public boolean getIsSupported() { return isSupported_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasResultMessage()) { memoizedIsInitialized = 0; return false; } if (!hasIsSupported()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, resultMessage_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, isSupported_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, resultMessage_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, isSupported_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto) obj; if (hasResultMessage() != other.hasResultMessage()) return false; if (hasResultMessage()) { if (!getResultMessage() .equals(other.getResultMessage())) return false; } if (hasIsSupported() != other.hasIsSupported()) return false; if (hasIsSupported()) { if (getIsSupported() != other.getIsSupported()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasResultMessage()) { hash = (37 * hash) + RESULTMESSAGE_FIELD_NUMBER; hash = (53 * hash) + getResultMessage().hashCode(); } if (hasIsSupported()) { hash = (37 * hash) + ISSUPPORTED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsSupported()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ECTopologyVerifierResultProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECTopologyVerifierResultProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; resultMessage_ = ""; isSupported_ = false; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.resultMessage_ = resultMessage_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.isSupported_ = isSupported_; to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.getDefaultInstance()) return this; if (other.hasResultMessage()) { resultMessage_ = other.resultMessage_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasIsSupported()) { setIsSupported(other.getIsSupported()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasResultMessage()) { return false; } if (!hasIsSupported()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { resultMessage_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { isSupported_ = input.readBool(); bitField0_ |= 0x00000002; break; } // case 16 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object resultMessage_ = ""; /** * required string resultMessage = 1; * @return Whether the resultMessage field is set. */ public boolean hasResultMessage() { return ((bitField0_ & 0x00000001) != 0); } /** * required string resultMessage = 1; * @return The resultMessage. */ public java.lang.String getResultMessage() { java.lang.Object ref = resultMessage_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { resultMessage_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string resultMessage = 1; * @return The bytes for resultMessage. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getResultMessageBytes() { java.lang.Object ref = resultMessage_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); resultMessage_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string resultMessage = 1; * @param value The resultMessage to set. * @return This builder for chaining. */ public Builder setResultMessage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } resultMessage_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string resultMessage = 1; * @return This builder for chaining. */ public Builder clearResultMessage() { resultMessage_ = getDefaultInstance().getResultMessage(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string resultMessage = 1; * @param value The bytes for resultMessage to set. * @return This builder for chaining. */ public Builder setResultMessageBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } resultMessage_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private boolean isSupported_ ; /** * required bool isSupported = 2; * @return Whether the isSupported field is set. */ @java.lang.Override public boolean hasIsSupported() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool isSupported = 2; * @return The isSupported. */ @java.lang.Override public boolean getIsSupported() { return isSupported_; } /** * required bool isSupported = 2; * @param value The isSupported to set. * @return This builder for chaining. */ public Builder setIsSupported(boolean value) { isSupported_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required bool isSupported = 2; * @return This builder for chaining. */ public Builder clearIsSupported() { bitField0_ = (bitField0_ & ~0x00000002); isSupported_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECTopologyVerifierResultProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECTopologyVerifierResultProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ECTopologyVerifierResultProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface HdfsPathHandleProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.HdfsPathHandleProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 inodeId = 1; * @return Whether the inodeId field is set. */ boolean hasInodeId(); /** * optional uint64 inodeId = 1; * @return The inodeId. */ long getInodeId(); /** * optional uint64 mtime = 2; * @return Whether the mtime field is set. */ boolean hasMtime(); /** * optional uint64 mtime = 2; * @return The mtime. */ long getMtime(); /** * optional string path = 3; * @return Whether the path field is set. */ boolean hasPath(); /** * optional string path = 3; * @return The path. */ java.lang.String getPath(); /** * optional string path = 3; * @return The bytes for path. */ org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes(); } /** *
   **
   * Placeholder type for consistent HDFS operations.
   * 
* * Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto} */ public static final class HdfsPathHandleProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.HdfsPathHandleProto) HdfsPathHandleProtoOrBuilder { private static final long serialVersionUID = 0L; // Use HdfsPathHandleProto.newBuilder() to construct. private HdfsPathHandleProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private HdfsPathHandleProto() { path_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new HdfsPathHandleProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class); } private int bitField0_; public static final int INODEID_FIELD_NUMBER = 1; private long inodeId_ = 0L; /** * optional uint64 inodeId = 1; * @return Whether the inodeId field is set. */ @java.lang.Override public boolean hasInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 inodeId = 1; * @return The inodeId. */ @java.lang.Override public long getInodeId() { return inodeId_; } public static final int MTIME_FIELD_NUMBER = 2; private long mtime_ = 0L; /** * optional uint64 mtime = 2; * @return Whether the mtime field is set. */ @java.lang.Override public boolean hasMtime() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 mtime = 2; * @return The mtime. */ @java.lang.Override public long getMtime() { return mtime_; } public static final int PATH_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object path_ = ""; /** * optional string path = 3; * @return Whether the path field is set. */ @java.lang.Override public boolean hasPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string path = 3; * @return The path. */ @java.lang.Override public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * optional string path = 3; * @return The bytes for path. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, inodeId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, mtime_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, path_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, inodeId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, mtime_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, path_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) obj; if (hasInodeId() != other.hasInodeId()) return false; if (hasInodeId()) { if (getInodeId() != other.getInodeId()) return false; } if (hasMtime() != other.hasMtime()) return false; if (hasMtime()) { if (getMtime() != other.getMtime()) return false; } if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasInodeId()) { hash = (37 * hash) + INODEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getInodeId()); } if (hasMtime()) { hash = (37 * hash) + MTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getMtime()); } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Placeholder type for consistent HDFS operations.
     * 
* * Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.HdfsPathHandleProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; inodeId_ = 0L; mtime_ = 0L; path_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.inodeId_ = inodeId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.mtime_ = mtime_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.path_ = path_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance()) return this; if (other.hasInodeId()) { setInodeId(other.getInodeId()); } if (other.hasMtime()) { setMtime(other.getMtime()); } if (other.hasPath()) { path_ = other.path_; bitField0_ |= 0x00000004; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { inodeId_ = input.readUInt64(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { mtime_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { path_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private long inodeId_ ; /** * optional uint64 inodeId = 1; * @return Whether the inodeId field is set. */ @java.lang.Override public boolean hasInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 inodeId = 1; * @return The inodeId. */ @java.lang.Override public long getInodeId() { return inodeId_; } /** * optional uint64 inodeId = 1; * @param value The inodeId to set. * @return This builder for chaining. */ public Builder setInodeId(long value) { inodeId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * optional uint64 inodeId = 1; * @return This builder for chaining. */ public Builder clearInodeId() { bitField0_ = (bitField0_ & ~0x00000001); inodeId_ = 0L; onChanged(); return this; } private long mtime_ ; /** * optional uint64 mtime = 2; * @return Whether the mtime field is set. */ @java.lang.Override public boolean hasMtime() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 mtime = 2; * @return The mtime. */ @java.lang.Override public long getMtime() { return mtime_; } /** * optional uint64 mtime = 2; * @param value The mtime to set. * @return This builder for chaining. */ public Builder setMtime(long value) { mtime_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional uint64 mtime = 2; * @return This builder for chaining. */ public Builder clearMtime() { bitField0_ = (bitField0_ & ~0x00000002); mtime_ = 0L; onChanged(); return this; } private java.lang.Object path_ = ""; /** * optional string path = 3; * @return Whether the path field is set. */ public boolean hasPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string path = 3; * @return The path. */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string path = 3; * @return The bytes for path. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string path = 3; * @param value The path to set. * @return This builder for chaining. */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } path_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional string path = 3; * @return This builder for chaining. */ public Builder clearPath() { path_ = getDefaultInstance().getPath(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * optional string path = 3; * @param value The bytes for path to set. * @return This builder for chaining. */ public Builder setPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } path_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsPathHandleProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsPathHandleProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public HdfsPathHandleProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface HdfsFileStatusProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.HdfsFileStatusProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @return Whether the fileType field is set. */ boolean hasFileType(); /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @return The fileType. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType(); /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; * @return Whether the path field is set. */ boolean hasPath(); /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; * @return The path. */ org.apache.hadoop.thirdparty.protobuf.ByteString getPath(); /** * required uint64 length = 3; * @return Whether the length field is set. */ boolean hasLength(); /** * required uint64 length = 3; * @return The length. */ long getLength(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; * @return Whether the permission field is set. */ boolean hasPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; * @return The permission. */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); /** * required string owner = 5; * @return Whether the owner field is set. */ boolean hasOwner(); /** * required string owner = 5; * @return The owner. */ java.lang.String getOwner(); /** * required string owner = 5; * @return The bytes for owner. */ org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes(); /** * required string group = 6; * @return Whether the group field is set. */ boolean hasGroup(); /** * required string group = 6; * @return The group. */ java.lang.String getGroup(); /** * required string group = 6; * @return The bytes for group. */ org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes(); /** * required uint64 modification_time = 7; * @return Whether the modificationTime field is set. */ boolean hasModificationTime(); /** * required uint64 modification_time = 7; * @return The modificationTime. */ long getModificationTime(); /** * required uint64 access_time = 8; * @return Whether the accessTime field is set. */ boolean hasAccessTime(); /** * required uint64 access_time = 8; * @return The accessTime. */ long getAccessTime(); /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; * @return Whether the symlink field is set. */ boolean hasSymlink(); /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; * @return The symlink. */ org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink(); /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; * @return Whether the blockReplication field is set. */ boolean hasBlockReplication(); /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; * @return The blockReplication. */ int getBlockReplication(); /** * optional uint64 blocksize = 11 [default = 0]; * @return Whether the blocksize field is set. */ boolean hasBlocksize(); /** * optional uint64 blocksize = 11 [default = 0]; * @return The blocksize. */ long getBlocksize(); /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * @return Whether the locations field is set. */ boolean hasLocations(); /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * @return The locations. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations(); /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder(); /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; * @return Whether the fileId field is set. */ boolean hasFileId(); /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; * @return The fileId. */ long getFileId(); /** * optional int32 childrenNum = 14 [default = -1]; * @return Whether the childrenNum field is set. */ boolean hasChildrenNum(); /** * optional int32 childrenNum = 14 [default = -1]; * @return The childrenNum. */ int getChildrenNum(); /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * @return Whether the fileEncryptionInfo field is set. */ boolean hasFileEncryptionInfo(); /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * @return The fileEncryptionInfo. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo(); /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder(); /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @return Whether the storagePolicy field is set. */ boolean hasStoragePolicy(); /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @return The storagePolicy. */ int getStoragePolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; * @return Whether the ecPolicy field is set. */ boolean hasEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; * @return The ecPolicy. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder(); /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; * @return Whether the flags field is set. */ boolean hasFlags(); /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; * @return The flags. */ int getFlags(); /** * optional string namespace = 19; * @return Whether the namespace field is set. */ boolean hasNamespace(); /** * optional string namespace = 19; * @return The namespace. */ java.lang.String getNamespace(); /** * optional string namespace = 19; * @return The bytes for namespace. */ org.apache.hadoop.thirdparty.protobuf.ByteString getNamespaceBytes(); } /** *
   **
   * Status of a file, directory or symlink
   * Optionally includes a file's block locations if requested by client on the rpc call.
   * 
* * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto} */ public static final class HdfsFileStatusProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.HdfsFileStatusProto) HdfsFileStatusProtoOrBuilder { private static final long serialVersionUID = 0L; // Use HdfsFileStatusProto.newBuilder() to construct. private HdfsFileStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private HdfsFileStatusProto() { fileType_ = 1; path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; owner_ = ""; group_ = ""; symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; childrenNum_ = -1; namespace_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new HdfsFileStatusProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.FileType} */ public enum FileType implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * IS_DIR = 1; */ IS_DIR(1), /** * IS_FILE = 2; */ IS_FILE(2), /** * IS_SYMLINK = 3; */ IS_SYMLINK(3), ; /** * IS_DIR = 1; */ public static final int IS_DIR_VALUE = 1; /** * IS_FILE = 2; */ public static final int IS_FILE_VALUE = 2; /** * IS_SYMLINK = 3; */ public static final int IS_SYMLINK_VALUE = 3; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static FileType valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static FileType forNumber(int value) { switch (value) { case 1: return IS_DIR; case 2: return IS_FILE; case 3: return IS_SYMLINK; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< FileType> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public FileType findValueByNumber(int number) { return FileType.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0); } private static final FileType[] VALUES = values(); public static FileType valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private FileType(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.FileType) } /** * Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.Flags} */ public enum Flags implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** *
       * has ACLs
       * 
* * HAS_ACL = 1; */ HAS_ACL(1), /** *
       * encrypted
       * 
* * HAS_CRYPT = 2; */ HAS_CRYPT(2), /** *
       * erasure coded
       * 
* * HAS_EC = 4; */ HAS_EC(4), /** *
       * SNAPSHOT ENABLED
       * 
* * SNAPSHOT_ENABLED = 8; */ SNAPSHOT_ENABLED(8), ; /** *
       * has ACLs
       * 
* * HAS_ACL = 1; */ public static final int HAS_ACL_VALUE = 1; /** *
       * encrypted
       * 
* * HAS_CRYPT = 2; */ public static final int HAS_CRYPT_VALUE = 2; /** *
       * erasure coded
       * 
* * HAS_EC = 4; */ public static final int HAS_EC_VALUE = 4; /** *
       * SNAPSHOT ENABLED
       * 
* * SNAPSHOT_ENABLED = 8; */ public static final int SNAPSHOT_ENABLED_VALUE = 8; public final int getNumber() { return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static Flags valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static Flags forNumber(int value) { switch (value) { case 1: return HAS_ACL; case 2: return HAS_CRYPT; case 4: return HAS_EC; case 8: return SNAPSHOT_ENABLED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< Flags> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public Flags findValueByNumber(int number) { return Flags.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(1); } private static final Flags[] VALUES = values(); public static Flags valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private Flags(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.Flags) } private int bitField0_; public static final int FILETYPE_FIELD_NUMBER = 1; private int fileType_ = 1; /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @return Whether the fileType field is set. */ @java.lang.Override public boolean hasFileType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @return The fileType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.forNumber(fileType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR : result; } public static final int PATH_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; * @return Whether the path field is set. */ @java.lang.Override public boolean hasPath() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; * @return The path. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getPath() { return path_; } public static final int LENGTH_FIELD_NUMBER = 3; private long length_ = 0L; /** * required uint64 length = 3; * @return Whether the length field is set. */ @java.lang.Override public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 length = 3; * @return The length. */ @java.lang.Override public long getLength() { return length_; } public static final int PERMISSION_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; /** * required .hadoop.hdfs.FsPermissionProto permission = 4; * @return Whether the permission field is set. */ @java.lang.Override public boolean hasPermission() { return ((bitField0_ & 0x00000008) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; * @return The permission. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } public static final int OWNER_FIELD_NUMBER = 5; @SuppressWarnings("serial") private volatile java.lang.Object owner_ = ""; /** * required string owner = 5; * @return Whether the owner field is set. */ @java.lang.Override public boolean hasOwner() { return ((bitField0_ & 0x00000010) != 0); } /** * required string owner = 5; * @return The owner. */ @java.lang.Override public java.lang.String getOwner() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } } /** * required string owner = 5; * @return The bytes for owner. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int GROUP_FIELD_NUMBER = 6; @SuppressWarnings("serial") private volatile java.lang.Object group_ = ""; /** * required string group = 6; * @return Whether the group field is set. */ @java.lang.Override public boolean hasGroup() { return ((bitField0_ & 0x00000020) != 0); } /** * required string group = 6; * @return The group. */ @java.lang.Override public java.lang.String getGroup() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } } /** * required string group = 6; * @return The bytes for group. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int MODIFICATION_TIME_FIELD_NUMBER = 7; private long modificationTime_ = 0L; /** * required uint64 modification_time = 7; * @return Whether the modificationTime field is set. */ @java.lang.Override public boolean hasModificationTime() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 modification_time = 7; * @return The modificationTime. */ @java.lang.Override public long getModificationTime() { return modificationTime_; } public static final int ACCESS_TIME_FIELD_NUMBER = 8; private long accessTime_ = 0L; /** * required uint64 access_time = 8; * @return Whether the accessTime field is set. */ @java.lang.Override public boolean hasAccessTime() { return ((bitField0_ & 0x00000080) != 0); } /** * required uint64 access_time = 8; * @return The accessTime. */ @java.lang.Override public long getAccessTime() { return accessTime_; } public static final int SYMLINK_FIELD_NUMBER = 9; private org.apache.hadoop.thirdparty.protobuf.ByteString symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; * @return Whether the symlink field is set. */ @java.lang.Override public boolean hasSymlink() { return ((bitField0_ & 0x00000100) != 0); } /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; * @return The symlink. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink() { return symlink_; } public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10; private int blockReplication_ = 0; /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; * @return Whether the blockReplication field is set. */ @java.lang.Override public boolean hasBlockReplication() { return ((bitField0_ & 0x00000200) != 0); } /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; * @return The blockReplication. */ @java.lang.Override public int getBlockReplication() { return blockReplication_; } public static final int BLOCKSIZE_FIELD_NUMBER = 11; private long blocksize_ = 0L; /** * optional uint64 blocksize = 11 [default = 0]; * @return Whether the blocksize field is set. */ @java.lang.Override public boolean hasBlocksize() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 blocksize = 11 [default = 0]; * @return The blocksize. */ @java.lang.Override public long getBlocksize() { return blocksize_; } public static final int LOCATIONS_FIELD_NUMBER = 12; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * @return Whether the locations field is set. */ @java.lang.Override public boolean hasLocations() { return ((bitField0_ & 0x00000800) != 0); } /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * @return The locations. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } public static final int FILEID_FIELD_NUMBER = 13; private long fileId_ = 0L; /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; * @return Whether the fileId field is set. */ @java.lang.Override public boolean hasFileId() { return ((bitField0_ & 0x00001000) != 0); } /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; * @return The fileId. */ @java.lang.Override public long getFileId() { return fileId_; } public static final int CHILDRENNUM_FIELD_NUMBER = 14; private int childrenNum_ = -1; /** * optional int32 childrenNum = 14 [default = -1]; * @return Whether the childrenNum field is set. */ @java.lang.Override public boolean hasChildrenNum() { return ((bitField0_ & 0x00002000) != 0); } /** * optional int32 childrenNum = 14 [default = -1]; * @return The childrenNum. */ @java.lang.Override public int getChildrenNum() { return childrenNum_; } public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 15; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * @return Whether the fileEncryptionInfo field is set. */ @java.lang.Override public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00004000) != 0); } /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * @return The fileEncryptionInfo. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } public static final int STORAGEPOLICY_FIELD_NUMBER = 16; private int storagePolicy_ = 0; /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @return Whether the storagePolicy field is set. */ @java.lang.Override public boolean hasStoragePolicy() { return ((bitField0_ & 0x00008000) != 0); } /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @return The storagePolicy. */ @java.lang.Override public int getStoragePolicy() { return storagePolicy_; } public static final int ECPOLICY_FIELD_NUMBER = 17; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; * @return Whether the ecPolicy field is set. */ @java.lang.Override public boolean hasEcPolicy() { return ((bitField0_ & 0x00010000) != 0); } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; * @return The ecPolicy. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } public static final int FLAGS_FIELD_NUMBER = 18; private int flags_ = 0; /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; * @return Whether the flags field is set. */ @java.lang.Override public boolean hasFlags() { return ((bitField0_ & 0x00020000) != 0); } /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; * @return The flags. */ @java.lang.Override public int getFlags() { return flags_; } public static final int NAMESPACE_FIELD_NUMBER = 19; @SuppressWarnings("serial") private volatile java.lang.Object namespace_ = ""; /** * optional string namespace = 19; * @return Whether the namespace field is set. */ @java.lang.Override public boolean hasNamespace() { return ((bitField0_ & 0x00040000) != 0); } /** * optional string namespace = 19; * @return The namespace. */ @java.lang.Override public java.lang.String getNamespace() { java.lang.Object ref = namespace_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { namespace_ = s; } return s; } } /** * optional string namespace = 19; * @return The bytes for namespace. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getNamespaceBytes() { java.lang.Object ref = namespace_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); namespace_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFileType()) { memoizedIsInitialized = 0; return false; } if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasPermission()) { memoizedIsInitialized = 0; return false; } if (!hasOwner()) { memoizedIsInitialized = 0; return false; } if (!hasGroup()) { memoizedIsInitialized = 0; return false; } if (!hasModificationTime()) { memoizedIsInitialized = 0; return false; } if (!hasAccessTime()) { memoizedIsInitialized = 0; return false; } if (!getPermission().isInitialized()) { memoizedIsInitialized = 0; return false; } if (hasLocations()) { if (!getLocations().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, fileType_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, path_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getPermission()); } if (((bitField0_ & 0x00000010) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, owner_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, group_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt64(7, modificationTime_); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt64(8, accessTime_); } if (((bitField0_ & 0x00000100) != 0)) { output.writeBytes(9, symlink_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeUInt32(10, blockReplication_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeUInt64(11, blocksize_); } if (((bitField0_ & 0x00000800) != 0)) { output.writeMessage(12, getLocations()); } if (((bitField0_ & 0x00001000) != 0)) { output.writeUInt64(13, fileId_); } if (((bitField0_ & 0x00002000) != 0)) { output.writeInt32(14, childrenNum_); } if (((bitField0_ & 0x00004000) != 0)) { output.writeMessage(15, getFileEncryptionInfo()); } if (((bitField0_ & 0x00008000) != 0)) { output.writeUInt32(16, storagePolicy_); } if (((bitField0_ & 0x00010000) != 0)) { output.writeMessage(17, getEcPolicy()); } if (((bitField0_ & 0x00020000) != 0)) { output.writeUInt32(18, flags_); } if (((bitField0_ & 0x00040000) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 19, namespace_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, fileType_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, path_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getPermission()); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, owner_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, group_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(7, modificationTime_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(8, accessTime_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(9, symlink_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(10, blockReplication_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(11, blocksize_); } if (((bitField0_ & 0x00000800) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(12, getLocations()); } if (((bitField0_ & 0x00001000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(13, fileId_); } if (((bitField0_ & 0x00002000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32Size(14, childrenNum_); } if (((bitField0_ & 0x00004000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(15, getFileEncryptionInfo()); } if (((bitField0_ & 0x00008000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(16, storagePolicy_); } if (((bitField0_ & 0x00010000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(17, getEcPolicy()); } if (((bitField0_ & 0x00020000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(18, flags_); } if (((bitField0_ & 0x00040000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(19, namespace_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj; if (hasFileType() != other.hasFileType()) return false; if (hasFileType()) { if (fileType_ != other.fileType_) return false; } if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (hasLength() != other.hasLength()) return false; if (hasLength()) { if (getLength() != other.getLength()) return false; } if (hasPermission() != other.hasPermission()) return false; if (hasPermission()) { if (!getPermission() .equals(other.getPermission())) return false; } if (hasOwner() != other.hasOwner()) return false; if (hasOwner()) { if (!getOwner() .equals(other.getOwner())) return false; } if (hasGroup() != other.hasGroup()) return false; if (hasGroup()) { if (!getGroup() .equals(other.getGroup())) return false; } if (hasModificationTime() != other.hasModificationTime()) return false; if (hasModificationTime()) { if (getModificationTime() != other.getModificationTime()) return false; } if (hasAccessTime() != other.hasAccessTime()) return false; if (hasAccessTime()) { if (getAccessTime() != other.getAccessTime()) return false; } if (hasSymlink() != other.hasSymlink()) return false; if (hasSymlink()) { if (!getSymlink() .equals(other.getSymlink())) return false; } if (hasBlockReplication() != other.hasBlockReplication()) return false; if (hasBlockReplication()) { if (getBlockReplication() != other.getBlockReplication()) return false; } if (hasBlocksize() != other.hasBlocksize()) return false; if (hasBlocksize()) { if (getBlocksize() != other.getBlocksize()) return false; } if (hasLocations() != other.hasLocations()) return false; if (hasLocations()) { if (!getLocations() .equals(other.getLocations())) return false; } if (hasFileId() != other.hasFileId()) return false; if (hasFileId()) { if (getFileId() != other.getFileId()) return false; } if (hasChildrenNum() != other.hasChildrenNum()) return false; if (hasChildrenNum()) { if (getChildrenNum() != other.getChildrenNum()) return false; } if (hasFileEncryptionInfo() != other.hasFileEncryptionInfo()) return false; if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo() .equals(other.getFileEncryptionInfo())) return false; } if (hasStoragePolicy() != other.hasStoragePolicy()) return false; if (hasStoragePolicy()) { if (getStoragePolicy() != other.getStoragePolicy()) return false; } if (hasEcPolicy() != other.hasEcPolicy()) return false; if (hasEcPolicy()) { if (!getEcPolicy() .equals(other.getEcPolicy())) return false; } if (hasFlags() != other.hasFlags()) return false; if (hasFlags()) { if (getFlags() != other.getFlags()) return false; } if (hasNamespace() != other.hasNamespace()) return false; if (hasNamespace()) { if (!getNamespace() .equals(other.getNamespace())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFileType()) { hash = (37 * hash) + FILETYPE_FIELD_NUMBER; hash = (53 * hash) + fileType_; } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLength()); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + getPermission().hashCode(); } if (hasOwner()) { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } if (hasGroup()) { hash = (37 * hash) + GROUP_FIELD_NUMBER; hash = (53 * hash) + getGroup().hashCode(); } if (hasModificationTime()) { hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getModificationTime()); } if (hasAccessTime()) { hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getAccessTime()); } if (hasSymlink()) { hash = (37 * hash) + SYMLINK_FIELD_NUMBER; hash = (53 * hash) + getSymlink().hashCode(); } if (hasBlockReplication()) { hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getBlockReplication(); } if (hasBlocksize()) { hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlocksize()); } if (hasLocations()) { hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; hash = (53 * hash) + getLocations().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileId()); } if (hasChildrenNum()) { hash = (37 * hash) + CHILDRENNUM_FIELD_NUMBER; hash = (53 * hash) + getChildrenNum(); } if (hasFileEncryptionInfo()) { hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER; hash = (53 * hash) + getFileEncryptionInfo().hashCode(); } if (hasStoragePolicy()) { hash = (37 * hash) + STORAGEPOLICY_FIELD_NUMBER; hash = (53 * hash) + getStoragePolicy(); } if (hasEcPolicy()) { hash = (37 * hash) + ECPOLICY_FIELD_NUMBER; hash = (53 * hash) + getEcPolicy().hashCode(); } if (hasFlags()) { hash = (37 * hash) + FLAGS_FIELD_NUMBER; hash = (53 * hash) + getFlags(); } if (hasNamespace()) { hash = (37 * hash) + NAMESPACE_FIELD_NUMBER; hash = (53 * hash) + getNamespace().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Status of a file, directory or symlink
     * Optionally includes a file's block locations if requested by client on the rpc call.
     * 
* * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.HdfsFileStatusProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPermissionFieldBuilder(); getLocationsFieldBuilder(); getFileEncryptionInfoFieldBuilder(); getEcPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; fileType_ = 1; path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; length_ = 0L; permission_ = null; if (permissionBuilder_ != null) { permissionBuilder_.dispose(); permissionBuilder_ = null; } owner_ = ""; group_ = ""; modificationTime_ = 0L; accessTime_ = 0L; symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; blockReplication_ = 0; blocksize_ = 0L; locations_ = null; if (locationsBuilder_ != null) { locationsBuilder_.dispose(); locationsBuilder_ = null; } fileId_ = 0L; childrenNum_ = -1; fileEncryptionInfo_ = null; if (fileEncryptionInfoBuilder_ != null) { fileEncryptionInfoBuilder_.dispose(); fileEncryptionInfoBuilder_ = null; } storagePolicy_ = 0; ecPolicy_ = null; if (ecPolicyBuilder_ != null) { ecPolicyBuilder_.dispose(); ecPolicyBuilder_ = null; } flags_ = 0; namespace_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fileType_ = fileType_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.path_ = path_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.length_ = length_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.permission_ = permissionBuilder_ == null ? permission_ : permissionBuilder_.build(); to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.owner_ = owner_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.group_ = group_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.modificationTime_ = modificationTime_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.accessTime_ = accessTime_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { result.symlink_ = symlink_; to_bitField0_ |= 0x00000100; } if (((from_bitField0_ & 0x00000200) != 0)) { result.blockReplication_ = blockReplication_; to_bitField0_ |= 0x00000200; } if (((from_bitField0_ & 0x00000400) != 0)) { result.blocksize_ = blocksize_; to_bitField0_ |= 0x00000400; } if (((from_bitField0_ & 0x00000800) != 0)) { result.locations_ = locationsBuilder_ == null ? locations_ : locationsBuilder_.build(); to_bitField0_ |= 0x00000800; } if (((from_bitField0_ & 0x00001000) != 0)) { result.fileId_ = fileId_; to_bitField0_ |= 0x00001000; } if (((from_bitField0_ & 0x00002000) != 0)) { result.childrenNum_ = childrenNum_; to_bitField0_ |= 0x00002000; } if (((from_bitField0_ & 0x00004000) != 0)) { result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_ == null ? fileEncryptionInfo_ : fileEncryptionInfoBuilder_.build(); to_bitField0_ |= 0x00004000; } if (((from_bitField0_ & 0x00008000) != 0)) { result.storagePolicy_ = storagePolicy_; to_bitField0_ |= 0x00008000; } if (((from_bitField0_ & 0x00010000) != 0)) { result.ecPolicy_ = ecPolicyBuilder_ == null ? ecPolicy_ : ecPolicyBuilder_.build(); to_bitField0_ |= 0x00010000; } if (((from_bitField0_ & 0x00020000) != 0)) { result.flags_ = flags_; to_bitField0_ |= 0x00020000; } if (((from_bitField0_ & 0x00040000) != 0)) { result.namespace_ = namespace_; to_bitField0_ |= 0x00040000; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this; if (other.hasFileType()) { setFileType(other.getFileType()); } if (other.hasPath()) { setPath(other.getPath()); } if (other.hasLength()) { setLength(other.getLength()); } if (other.hasPermission()) { mergePermission(other.getPermission()); } if (other.hasOwner()) { owner_ = other.owner_; bitField0_ |= 0x00000010; onChanged(); } if (other.hasGroup()) { group_ = other.group_; bitField0_ |= 0x00000020; onChanged(); } if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); } if (other.hasAccessTime()) { setAccessTime(other.getAccessTime()); } if (other.hasSymlink()) { setSymlink(other.getSymlink()); } if (other.hasBlockReplication()) { setBlockReplication(other.getBlockReplication()); } if (other.hasBlocksize()) { setBlocksize(other.getBlocksize()); } if (other.hasLocations()) { mergeLocations(other.getLocations()); } if (other.hasFileId()) { setFileId(other.getFileId()); } if (other.hasChildrenNum()) { setChildrenNum(other.getChildrenNum()); } if (other.hasFileEncryptionInfo()) { mergeFileEncryptionInfo(other.getFileEncryptionInfo()); } if (other.hasStoragePolicy()) { setStoragePolicy(other.getStoragePolicy()); } if (other.hasEcPolicy()) { mergeEcPolicy(other.getEcPolicy()); } if (other.hasFlags()) { setFlags(other.getFlags()); } if (other.hasNamespace()) { namespace_ = other.namespace_; bitField0_ |= 0x00040000; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFileType()) { return false; } if (!hasPath()) { return false; } if (!hasLength()) { return false; } if (!hasPermission()) { return false; } if (!hasOwner()) { return false; } if (!hasGroup()) { return false; } if (!hasModificationTime()) { return false; } if (!hasAccessTime()) { return false; } if (!getPermission().isInitialized()) { return false; } if (hasLocations()) { if (!getLocations().isInitialized()) { return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { fileType_ = tmpRaw; bitField0_ |= 0x00000001; } break; } // case 8 case 18: { path_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 24: { length_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 34: { input.readMessage( getPermissionFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000008; break; } // case 34 case 42: { owner_ = input.readBytes(); bitField0_ |= 0x00000010; break; } // case 42 case 50: { group_ = input.readBytes(); bitField0_ |= 0x00000020; break; } // case 50 case 56: { modificationTime_ = input.readUInt64(); bitField0_ |= 0x00000040; break; } // case 56 case 64: { accessTime_ = input.readUInt64(); bitField0_ |= 0x00000080; break; } // case 64 case 74: { symlink_ = input.readBytes(); bitField0_ |= 0x00000100; break; } // case 74 case 80: { blockReplication_ = input.readUInt32(); bitField0_ |= 0x00000200; break; } // case 80 case 88: { blocksize_ = input.readUInt64(); bitField0_ |= 0x00000400; break; } // case 88 case 98: { input.readMessage( getLocationsFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000800; break; } // case 98 case 104: { fileId_ = input.readUInt64(); bitField0_ |= 0x00001000; break; } // case 104 case 112: { childrenNum_ = input.readInt32(); bitField0_ |= 0x00002000; break; } // case 112 case 122: { input.readMessage( getFileEncryptionInfoFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00004000; break; } // case 122 case 128: { storagePolicy_ = input.readUInt32(); bitField0_ |= 0x00008000; break; } // case 128 case 138: { input.readMessage( getEcPolicyFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00010000; break; } // case 138 case 144: { flags_ = input.readUInt32(); bitField0_ |= 0x00020000; break; } // case 144 case 154: { namespace_ = input.readBytes(); bitField0_ |= 0x00040000; break; } // case 154 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int fileType_ = 1; /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @return Whether the fileType field is set. */ @java.lang.Override public boolean hasFileType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @return The fileType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.forNumber(fileType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR : result; } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @param value The fileType to set. * @return This builder for chaining. */ public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; fileType_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; * @return This builder for chaining. */ public Builder clearFileType() { bitField0_ = (bitField0_ & ~0x00000001); fileType_ = 1; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; * @return Whether the path field is set. */ @java.lang.Override public boolean hasPath() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; * @return The path. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getPath() { return path_; } /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; * @param value The path to set. * @return This builder for chaining. */ public Builder setPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } path_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; * @return This builder for chaining. */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000002); path_ = getDefaultInstance().getPath(); onChanged(); return this; } private long length_ ; /** * required uint64 length = 3; * @return Whether the length field is set. */ @java.lang.Override public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 length = 3; * @return The length. */ @java.lang.Override public long getLength() { return length_; } /** * required uint64 length = 3; * @param value The length to set. * @return This builder for chaining. */ public Builder setLength(long value) { length_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint64 length = 3; * @return This builder for chaining. */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000004); length_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_; /** * required .hadoop.hdfs.FsPermissionProto permission = 4; * @return Whether the permission field is set. */ public boolean hasPermission() { return ((bitField0_ & 0x00000008) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; * @return The permission. */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { if (permissionBuilder_ == null) { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } else { return permissionBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } permission_ = value; } else { permissionBuilder_.setMessage(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder setPermission( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (permissionBuilder_ == null) { permission_ = builderForValue.build(); } else { permissionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && permission_ != null && permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { getPermissionBuilder().mergeFrom(value); } else { permission_ = value; } } else { permissionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder clearPermission() { bitField0_ = (bitField0_ & ~0x00000008); permission_ = null; if (permissionBuilder_ != null) { permissionBuilder_.dispose(); permissionBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() { bitField0_ |= 0x00000008; onChanged(); return getPermissionFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { if (permissionBuilder_ != null) { return permissionBuilder_.getMessageOrBuilder(); } else { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getPermissionFieldBuilder() { if (permissionBuilder_ == null) { permissionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( getPermission(), getParentForChildren(), isClean()); permission_ = null; } return permissionBuilder_; } private java.lang.Object owner_ = ""; /** * required string owner = 5; * @return Whether the owner field is set. */ public boolean hasOwner() { return ((bitField0_ & 0x00000010) != 0); } /** * required string owner = 5; * @return The owner. */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string owner = 5; * @return The bytes for owner. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string owner = 5; * @param value The owner to set. * @return This builder for chaining. */ public Builder setOwner( java.lang.String value) { if (value == null) { throw new NullPointerException(); } owner_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required string owner = 5; * @return This builder for chaining. */ public Builder clearOwner() { owner_ = getDefaultInstance().getOwner(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * required string owner = 5; * @param value The bytes for owner to set. * @return This builder for chaining. */ public Builder setOwnerBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } owner_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } private java.lang.Object group_ = ""; /** * required string group = 6; * @return Whether the group field is set. */ public boolean hasGroup() { return ((bitField0_ & 0x00000020) != 0); } /** * required string group = 6; * @return The group. */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string group = 6; * @return The bytes for group. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string group = 6; * @param value The group to set. * @return This builder for chaining. */ public Builder setGroup( java.lang.String value) { if (value == null) { throw new NullPointerException(); } group_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * required string group = 6; * @return This builder for chaining. */ public Builder clearGroup() { group_ = getDefaultInstance().getGroup(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } /** * required string group = 6; * @param value The bytes for group to set. * @return This builder for chaining. */ public Builder setGroupBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } group_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } private long modificationTime_ ; /** * required uint64 modification_time = 7; * @return Whether the modificationTime field is set. */ @java.lang.Override public boolean hasModificationTime() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 modification_time = 7; * @return The modificationTime. */ @java.lang.Override public long getModificationTime() { return modificationTime_; } /** * required uint64 modification_time = 7; * @param value The modificationTime to set. * @return This builder for chaining. */ public Builder setModificationTime(long value) { modificationTime_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * required uint64 modification_time = 7; * @return This builder for chaining. */ public Builder clearModificationTime() { bitField0_ = (bitField0_ & ~0x00000040); modificationTime_ = 0L; onChanged(); return this; } private long accessTime_ ; /** * required uint64 access_time = 8; * @return Whether the accessTime field is set. */ @java.lang.Override public boolean hasAccessTime() { return ((bitField0_ & 0x00000080) != 0); } /** * required uint64 access_time = 8; * @return The accessTime. */ @java.lang.Override public long getAccessTime() { return accessTime_; } /** * required uint64 access_time = 8; * @param value The accessTime to set. * @return This builder for chaining. */ public Builder setAccessTime(long value) { accessTime_ = value; bitField0_ |= 0x00000080; onChanged(); return this; } /** * required uint64 access_time = 8; * @return This builder for chaining. */ public Builder clearAccessTime() { bitField0_ = (bitField0_ & ~0x00000080); accessTime_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; * @return Whether the symlink field is set. */ @java.lang.Override public boolean hasSymlink() { return ((bitField0_ & 0x00000100) != 0); } /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; * @return The symlink. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink() { return symlink_; } /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; * @param value The symlink to set. * @return This builder for chaining. */ public Builder setSymlink(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } symlink_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; * @return This builder for chaining. */ public Builder clearSymlink() { bitField0_ = (bitField0_ & ~0x00000100); symlink_ = getDefaultInstance().getSymlink(); onChanged(); return this; } private int blockReplication_ ; /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; * @return Whether the blockReplication field is set. */ @java.lang.Override public boolean hasBlockReplication() { return ((bitField0_ & 0x00000200) != 0); } /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; * @return The blockReplication. */ @java.lang.Override public int getBlockReplication() { return blockReplication_; } /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; * @param value The blockReplication to set. * @return This builder for chaining. */ public Builder setBlockReplication(int value) { blockReplication_ = value; bitField0_ |= 0x00000200; onChanged(); return this; } /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; * @return This builder for chaining. */ public Builder clearBlockReplication() { bitField0_ = (bitField0_ & ~0x00000200); blockReplication_ = 0; onChanged(); return this; } private long blocksize_ ; /** * optional uint64 blocksize = 11 [default = 0]; * @return Whether the blocksize field is set. */ @java.lang.Override public boolean hasBlocksize() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 blocksize = 11 [default = 0]; * @return The blocksize. */ @java.lang.Override public long getBlocksize() { return blocksize_; } /** * optional uint64 blocksize = 11 [default = 0]; * @param value The blocksize to set. * @return This builder for chaining. */ public Builder setBlocksize(long value) { blocksize_ = value; bitField0_ |= 0x00000400; onChanged(); return this; } /** * optional uint64 blocksize = 11 [default = 0]; * @return This builder for chaining. */ public Builder clearBlocksize() { bitField0_ = (bitField0_ & ~0x00000400); blocksize_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_; /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * @return Whether the locations field is set. */ public boolean hasLocations() { return ((bitField0_ & 0x00000800) != 0); } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * @return The locations. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { if (locationsBuilder_ == null) { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } else { return locationsBuilder_.getMessage(); } } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } locations_ = value; } else { locationsBuilder_.setMessage(value); } bitField0_ |= 0x00000800; onChanged(); return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder setLocations( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) { if (locationsBuilder_ == null) { locations_ = builderForValue.build(); } else { locationsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000800; onChanged(); return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (((bitField0_ & 0x00000800) != 0) && locations_ != null && locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) { getLocationsBuilder().mergeFrom(value); } else { locations_ = value; } } else { locationsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000800; onChanged(); return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder clearLocations() { bitField0_ = (bitField0_ & ~0x00000800); locations_ = null; if (locationsBuilder_ != null) { locationsBuilder_.dispose(); locationsBuilder_ = null; } onChanged(); return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() { bitField0_ |= 0x00000800; onChanged(); return getLocationsFieldBuilder().getBuilder(); } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { if (locationsBuilder_ != null) { return locationsBuilder_.getMessageOrBuilder(); } else { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> getLocationsFieldBuilder() { if (locationsBuilder_ == null) { locationsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>( getLocations(), getParentForChildren(), isClean()); locations_ = null; } return locationsBuilder_; } private long fileId_ ; /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; * @return Whether the fileId field is set. */ @java.lang.Override public boolean hasFileId() { return ((bitField0_ & 0x00001000) != 0); } /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; * @return The fileId. */ @java.lang.Override public long getFileId() { return fileId_; } /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; * @param value The fileId to set. * @return This builder for chaining. */ public Builder setFileId(long value) { fileId_ = value; bitField0_ |= 0x00001000; onChanged(); return this; } /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; * @return This builder for chaining. */ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00001000); fileId_ = 0L; onChanged(); return this; } private int childrenNum_ = -1; /** * optional int32 childrenNum = 14 [default = -1]; * @return Whether the childrenNum field is set. */ @java.lang.Override public boolean hasChildrenNum() { return ((bitField0_ & 0x00002000) != 0); } /** * optional int32 childrenNum = 14 [default = -1]; * @return The childrenNum. */ @java.lang.Override public int getChildrenNum() { return childrenNum_; } /** * optional int32 childrenNum = 14 [default = -1]; * @param value The childrenNum to set. * @return This builder for chaining. */ public Builder setChildrenNum(int value) { childrenNum_ = value; bitField0_ |= 0x00002000; onChanged(); return this; } /** * optional int32 childrenNum = 14 [default = -1]; * @return This builder for chaining. */ public Builder clearChildrenNum() { bitField0_ = (bitField0_ & ~0x00002000); childrenNum_ = -1; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_; /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * @return Whether the fileEncryptionInfo field is set. */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00004000) != 0); } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * @return The fileEncryptionInfo. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } else { return fileEncryptionInfoBuilder_.getMessage(); } } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fileEncryptionInfo_ = value; } else { fileEncryptionInfoBuilder_.setMessage(value); } bitField0_ |= 0x00004000; onChanged(); return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder setFileEncryptionInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = builderForValue.build(); } else { fileEncryptionInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00004000; onChanged(); return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (((bitField0_ & 0x00004000) != 0) && fileEncryptionInfo_ != null && fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) { getFileEncryptionInfoBuilder().mergeFrom(value); } else { fileEncryptionInfo_ = value; } } else { fileEncryptionInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00004000; onChanged(); return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder clearFileEncryptionInfo() { bitField0_ = (bitField0_ & ~0x00004000); fileEncryptionInfo_ = null; if (fileEncryptionInfoBuilder_ != null) { fileEncryptionInfoBuilder_.dispose(); fileEncryptionInfoBuilder_ = null; } onChanged(); return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() { bitField0_ |= 0x00004000; onChanged(); return getFileEncryptionInfoFieldBuilder().getBuilder(); } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { if (fileEncryptionInfoBuilder_ != null) { return fileEncryptionInfoBuilder_.getMessageOrBuilder(); } else { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> getFileEncryptionInfoFieldBuilder() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>( getFileEncryptionInfo(), getParentForChildren(), isClean()); fileEncryptionInfo_ = null; } return fileEncryptionInfoBuilder_; } private int storagePolicy_ ; /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @return Whether the storagePolicy field is set. */ @java.lang.Override public boolean hasStoragePolicy() { return ((bitField0_ & 0x00008000) != 0); } /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @return The storagePolicy. */ @java.lang.Override public int getStoragePolicy() { return storagePolicy_; } /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @param value The storagePolicy to set. * @return This builder for chaining. */ public Builder setStoragePolicy(int value) { storagePolicy_ = value; bitField0_ |= 0x00008000; onChanged(); return this; } /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; * @return This builder for chaining. */ public Builder clearStoragePolicy() { bitField0_ = (bitField0_ & ~0x00008000); storagePolicy_ = 0; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_; /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; * @return Whether the ecPolicy field is set. */ public boolean hasEcPolicy() { return ((bitField0_ & 0x00010000) != 0); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; * @return The ecPolicy. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { if (ecPolicyBuilder_ == null) { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } else { return ecPolicyBuilder_.getMessage(); } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ecPolicy_ = value; } else { ecPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00010000; onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder setEcPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (ecPolicyBuilder_ == null) { ecPolicy_ = builderForValue.build(); } else { ecPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00010000; onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (((bitField0_ & 0x00010000) != 0) && ecPolicy_ != null && ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) { getEcPolicyBuilder().mergeFrom(value); } else { ecPolicy_ = value; } } else { ecPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00010000; onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder clearEcPolicy() { bitField0_ = (bitField0_ & ~0x00010000); ecPolicy_ = null; if (ecPolicyBuilder_ != null) { ecPolicyBuilder_.dispose(); ecPolicyBuilder_ = null; } onChanged(); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() { bitField0_ |= 0x00010000; onChanged(); return getEcPolicyFieldBuilder().getBuilder(); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { if (ecPolicyBuilder_ != null) { return ecPolicyBuilder_.getMessageOrBuilder(); } else { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> getEcPolicyFieldBuilder() { if (ecPolicyBuilder_ == null) { ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>( getEcPolicy(), getParentForChildren(), isClean()); ecPolicy_ = null; } return ecPolicyBuilder_; } private int flags_ ; /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; * @return Whether the flags field is set. */ @java.lang.Override public boolean hasFlags() { return ((bitField0_ & 0x00020000) != 0); } /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; * @return The flags. */ @java.lang.Override public int getFlags() { return flags_; } /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; * @param value The flags to set. * @return This builder for chaining. */ public Builder setFlags(int value) { flags_ = value; bitField0_ |= 0x00020000; onChanged(); return this; } /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; * @return This builder for chaining. */ public Builder clearFlags() { bitField0_ = (bitField0_ & ~0x00020000); flags_ = 0; onChanged(); return this; } private java.lang.Object namespace_ = ""; /** * optional string namespace = 19; * @return Whether the namespace field is set. */ public boolean hasNamespace() { return ((bitField0_ & 0x00040000) != 0); } /** * optional string namespace = 19; * @return The namespace. */ public java.lang.String getNamespace() { java.lang.Object ref = namespace_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { namespace_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string namespace = 19; * @return The bytes for namespace. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNamespaceBytes() { java.lang.Object ref = namespace_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); namespace_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string namespace = 19; * @param value The namespace to set. * @return This builder for chaining. */ public Builder setNamespace( java.lang.String value) { if (value == null) { throw new NullPointerException(); } namespace_ = value; bitField0_ |= 0x00040000; onChanged(); return this; } /** * optional string namespace = 19; * @return This builder for chaining. */ public Builder clearNamespace() { namespace_ = getDefaultInstance().getNamespace(); bitField0_ = (bitField0_ & ~0x00040000); onChanged(); return this; } /** * optional string namespace = 19; * @param value The bytes for namespace to set. * @return This builder for chaining. */ public Builder setNamespaceBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } namespace_ = value; bitField0_ |= 0x00040000; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsFileStatusProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsFileStatusProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public HdfsFileStatusProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockChecksumOptionsProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockChecksumOptionsProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @return Whether the blockChecksumType field is set. */ boolean hasBlockChecksumType(); /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @return The blockChecksumType. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType(); /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; * @return Whether the stripeLength field is set. */ boolean hasStripeLength(); /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; * @return The stripeLength. */ long getStripeLength(); } /** *
   **
   * Algorithms/types denoting how block-level checksums are computed using
   * lower-level chunk checksums/CRCs.
   * These options should be kept in sync with
   * org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto} */ public static final class BlockChecksumOptionsProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockChecksumOptionsProto) BlockChecksumOptionsProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockChecksumOptionsProto.newBuilder() to construct. private BlockChecksumOptionsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockChecksumOptionsProto() { blockChecksumType_ = 1; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new BlockChecksumOptionsProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class); } private int bitField0_; public static final int BLOCKCHECKSUMTYPE_FIELD_NUMBER = 1; private int blockChecksumType_ = 1; /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @return Whether the blockChecksumType field is set. */ @java.lang.Override public boolean hasBlockChecksumType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @return The blockChecksumType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.forNumber(blockChecksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC : result; } public static final int STRIPELENGTH_FIELD_NUMBER = 2; private long stripeLength_ = 0L; /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; * @return Whether the stripeLength field is set. */ @java.lang.Override public boolean hasStripeLength() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; * @return The stripeLength. */ @java.lang.Override public long getStripeLength() { return stripeLength_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, blockChecksumType_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, stripeLength_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, blockChecksumType_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, stripeLength_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) obj; if (hasBlockChecksumType() != other.hasBlockChecksumType()) return false; if (hasBlockChecksumType()) { if (blockChecksumType_ != other.blockChecksumType_) return false; } if (hasStripeLength() != other.hasStripeLength()) return false; if (hasStripeLength()) { if (getStripeLength() != other.getStripeLength()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockChecksumType()) { hash = (37 * hash) + BLOCKCHECKSUMTYPE_FIELD_NUMBER; hash = (53 * hash) + blockChecksumType_; } if (hasStripeLength()) { hash = (37 * hash) + STRIPELENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getStripeLength()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Algorithms/types denoting how block-level checksums are computed using
     * lower-level chunk checksums/CRCs.
     * These options should be kept in sync with
     * org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockChecksumOptionsProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; blockChecksumType_ = 1; stripeLength_ = 0L; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.blockChecksumType_ = blockChecksumType_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.stripeLength_ = stripeLength_; to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) return this; if (other.hasBlockChecksumType()) { setBlockChecksumType(other.getBlockChecksumType()); } if (other.hasStripeLength()) { setStripeLength(other.getStripeLength()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(1, tmpRaw); } else { blockChecksumType_ = tmpRaw; bitField0_ |= 0x00000001; } break; } // case 8 case 16: { stripeLength_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int blockChecksumType_ = 1; /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @return Whether the blockChecksumType field is set. */ @java.lang.Override public boolean hasBlockChecksumType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @return The blockChecksumType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.forNumber(blockChecksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC : result; } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @param value The blockChecksumType to set. * @return This builder for chaining. */ public Builder setBlockChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockChecksumType_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; * @return This builder for chaining. */ public Builder clearBlockChecksumType() { bitField0_ = (bitField0_ & ~0x00000001); blockChecksumType_ = 1; onChanged(); return this; } private long stripeLength_ ; /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; * @return Whether the stripeLength field is set. */ @java.lang.Override public boolean hasStripeLength() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; * @return The stripeLength. */ @java.lang.Override public long getStripeLength() { return stripeLength_; } /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; * @param value The stripeLength to set. * @return This builder for chaining. */ public Builder setStripeLength(long value) { stripeLength_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; * @return This builder for chaining. */ public Builder clearStripeLength() { bitField0_ = (bitField0_ & ~0x00000002); stripeLength_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockChecksumOptionsProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockChecksumOptionsProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockChecksumOptionsProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface FsServerDefaultsProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.FsServerDefaultsProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 blockSize = 1; * @return Whether the blockSize field is set. */ boolean hasBlockSize(); /** * required uint64 blockSize = 1; * @return The blockSize. */ long getBlockSize(); /** * required uint32 bytesPerChecksum = 2; * @return Whether the bytesPerChecksum field is set. */ boolean hasBytesPerChecksum(); /** * required uint32 bytesPerChecksum = 2; * @return The bytesPerChecksum. */ int getBytesPerChecksum(); /** * required uint32 writePacketSize = 3; * @return Whether the writePacketSize field is set. */ boolean hasWritePacketSize(); /** * required uint32 writePacketSize = 3; * @return The writePacketSize. */ int getWritePacketSize(); /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; * @return Whether the replication field is set. */ boolean hasReplication(); /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; * @return The replication. */ int getReplication(); /** * required uint32 fileBufferSize = 5; * @return Whether the fileBufferSize field is set. */ boolean hasFileBufferSize(); /** * required uint32 fileBufferSize = 5; * @return The fileBufferSize. */ int getFileBufferSize(); /** * optional bool encryptDataTransfer = 6 [default = false]; * @return Whether the encryptDataTransfer field is set. */ boolean hasEncryptDataTransfer(); /** * optional bool encryptDataTransfer = 6 [default = false]; * @return The encryptDataTransfer. */ boolean getEncryptDataTransfer(); /** * optional uint64 trashInterval = 7 [default = 0]; * @return Whether the trashInterval field is set. */ boolean hasTrashInterval(); /** * optional uint64 trashInterval = 7 [default = 0]; * @return The trashInterval. */ long getTrashInterval(); /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @return Whether the checksumType field is set. */ boolean hasChecksumType(); /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @return The checksumType. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType(); /** * optional string keyProviderUri = 9; * @return Whether the keyProviderUri field is set. */ boolean hasKeyProviderUri(); /** * optional string keyProviderUri = 9; * @return The keyProviderUri. */ java.lang.String getKeyProviderUri(); /** * optional string keyProviderUri = 9; * @return The bytes for keyProviderUri. */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyProviderUriBytes(); /** * optional uint32 policyId = 10 [default = 0]; * @return Whether the policyId field is set. */ boolean hasPolicyId(); /** * optional uint32 policyId = 10 [default = 0]; * @return The policyId. */ int getPolicyId(); /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @return Whether the snapshotTrashRootEnabled field is set. */ boolean hasSnapshotTrashRootEnabled(); /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @return The snapshotTrashRootEnabled. */ boolean getSnapshotTrashRootEnabled(); } /** *
   **
   * HDFS Server Defaults
   * 
* * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto} */ public static final class FsServerDefaultsProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.FsServerDefaultsProto) FsServerDefaultsProtoOrBuilder { private static final long serialVersionUID = 0L; // Use FsServerDefaultsProto.newBuilder() to construct. private FsServerDefaultsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FsServerDefaultsProto() { checksumType_ = 1; keyProviderUri_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new FsServerDefaultsProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); } private int bitField0_; public static final int BLOCKSIZE_FIELD_NUMBER = 1; private long blockSize_ = 0L; /** * required uint64 blockSize = 1; * @return Whether the blockSize field is set. */ @java.lang.Override public boolean hasBlockSize() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockSize = 1; * @return The blockSize. */ @java.lang.Override public long getBlockSize() { return blockSize_; } public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2; private int bytesPerChecksum_ = 0; /** * required uint32 bytesPerChecksum = 2; * @return Whether the bytesPerChecksum field is set. */ @java.lang.Override public boolean hasBytesPerChecksum() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 bytesPerChecksum = 2; * @return The bytesPerChecksum. */ @java.lang.Override public int getBytesPerChecksum() { return bytesPerChecksum_; } public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3; private int writePacketSize_ = 0; /** * required uint32 writePacketSize = 3; * @return Whether the writePacketSize field is set. */ @java.lang.Override public boolean hasWritePacketSize() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 writePacketSize = 3; * @return The writePacketSize. */ @java.lang.Override public int getWritePacketSize() { return writePacketSize_; } public static final int REPLICATION_FIELD_NUMBER = 4; private int replication_ = 0; /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; * @return Whether the replication field is set. */ @java.lang.Override public boolean hasReplication() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; * @return The replication. */ @java.lang.Override public int getReplication() { return replication_; } public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5; private int fileBufferSize_ = 0; /** * required uint32 fileBufferSize = 5; * @return Whether the fileBufferSize field is set. */ @java.lang.Override public boolean hasFileBufferSize() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint32 fileBufferSize = 5; * @return The fileBufferSize. */ @java.lang.Override public int getFileBufferSize() { return fileBufferSize_; } public static final int ENCRYPTDATATRANSFER_FIELD_NUMBER = 6; private boolean encryptDataTransfer_ = false; /** * optional bool encryptDataTransfer = 6 [default = false]; * @return Whether the encryptDataTransfer field is set. */ @java.lang.Override public boolean hasEncryptDataTransfer() { return ((bitField0_ & 0x00000020) != 0); } /** * optional bool encryptDataTransfer = 6 [default = false]; * @return The encryptDataTransfer. */ @java.lang.Override public boolean getEncryptDataTransfer() { return encryptDataTransfer_; } public static final int TRASHINTERVAL_FIELD_NUMBER = 7; private long trashInterval_ = 0L; /** * optional uint64 trashInterval = 7 [default = 0]; * @return Whether the trashInterval field is set. */ @java.lang.Override public boolean hasTrashInterval() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint64 trashInterval = 7 [default = 0]; * @return The trashInterval. */ @java.lang.Override public long getTrashInterval() { return trashInterval_; } public static final int CHECKSUMTYPE_FIELD_NUMBER = 8; private int checksumType_ = 1; /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @return Whether the checksumType field is set. */ @java.lang.Override public boolean hasChecksumType() { return ((bitField0_ & 0x00000080) != 0); } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @return The checksumType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(checksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32 : result; } public static final int KEYPROVIDERURI_FIELD_NUMBER = 9; @SuppressWarnings("serial") private volatile java.lang.Object keyProviderUri_ = ""; /** * optional string keyProviderUri = 9; * @return Whether the keyProviderUri field is set. */ @java.lang.Override public boolean hasKeyProviderUri() { return ((bitField0_ & 0x00000100) != 0); } /** * optional string keyProviderUri = 9; * @return The keyProviderUri. */ @java.lang.Override public java.lang.String getKeyProviderUri() { java.lang.Object ref = keyProviderUri_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyProviderUri_ = s; } return s; } } /** * optional string keyProviderUri = 9; * @return The bytes for keyProviderUri. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyProviderUriBytes() { java.lang.Object ref = keyProviderUri_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyProviderUri_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int POLICYID_FIELD_NUMBER = 10; private int policyId_ = 0; /** * optional uint32 policyId = 10 [default = 0]; * @return Whether the policyId field is set. */ @java.lang.Override public boolean hasPolicyId() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint32 policyId = 10 [default = 0]; * @return The policyId. */ @java.lang.Override public int getPolicyId() { return policyId_; } public static final int SNAPSHOTTRASHROOTENABLED_FIELD_NUMBER = 11; private boolean snapshotTrashRootEnabled_ = false; /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @return Whether the snapshotTrashRootEnabled field is set. */ @java.lang.Override public boolean hasSnapshotTrashRootEnabled() { return ((bitField0_ & 0x00000400) != 0); } /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @return The snapshotTrashRootEnabled. */ @java.lang.Override public boolean getSnapshotTrashRootEnabled() { return snapshotTrashRootEnabled_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasBlockSize()) { memoizedIsInitialized = 0; return false; } if (!hasBytesPerChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasWritePacketSize()) { memoizedIsInitialized = 0; return false; } if (!hasReplication()) { memoizedIsInitialized = 0; return false; } if (!hasFileBufferSize()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, blockSize_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, bytesPerChecksum_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, writePacketSize_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, replication_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt32(5, fileBufferSize_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeBool(6, encryptDataTransfer_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt64(7, trashInterval_); } if (((bitField0_ & 0x00000080) != 0)) { output.writeEnum(8, checksumType_); } if (((bitField0_ & 0x00000100) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 9, keyProviderUri_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeUInt32(10, policyId_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeBool(11, snapshotTrashRootEnabled_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, blockSize_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, bytesPerChecksum_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, writePacketSize_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, replication_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(5, fileBufferSize_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(6, encryptDataTransfer_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(7, trashInterval_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(8, checksumType_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(9, keyProviderUri_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(10, policyId_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(11, snapshotTrashRootEnabled_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj; if (hasBlockSize() != other.hasBlockSize()) return false; if (hasBlockSize()) { if (getBlockSize() != other.getBlockSize()) return false; } if (hasBytesPerChecksum() != other.hasBytesPerChecksum()) return false; if (hasBytesPerChecksum()) { if (getBytesPerChecksum() != other.getBytesPerChecksum()) return false; } if (hasWritePacketSize() != other.hasWritePacketSize()) return false; if (hasWritePacketSize()) { if (getWritePacketSize() != other.getWritePacketSize()) return false; } if (hasReplication() != other.hasReplication()) return false; if (hasReplication()) { if (getReplication() != other.getReplication()) return false; } if (hasFileBufferSize() != other.hasFileBufferSize()) return false; if (hasFileBufferSize()) { if (getFileBufferSize() != other.getFileBufferSize()) return false; } if (hasEncryptDataTransfer() != other.hasEncryptDataTransfer()) return false; if (hasEncryptDataTransfer()) { if (getEncryptDataTransfer() != other.getEncryptDataTransfer()) return false; } if (hasTrashInterval() != other.hasTrashInterval()) return false; if (hasTrashInterval()) { if (getTrashInterval() != other.getTrashInterval()) return false; } if (hasChecksumType() != other.hasChecksumType()) return false; if (hasChecksumType()) { if (checksumType_ != other.checksumType_) return false; } if (hasKeyProviderUri() != other.hasKeyProviderUri()) return false; if (hasKeyProviderUri()) { if (!getKeyProviderUri() .equals(other.getKeyProviderUri())) return false; } if (hasPolicyId() != other.hasPolicyId()) return false; if (hasPolicyId()) { if (getPolicyId() != other.getPolicyId()) return false; } if (hasSnapshotTrashRootEnabled() != other.hasSnapshotTrashRootEnabled()) return false; if (hasSnapshotTrashRootEnabled()) { if (getSnapshotTrashRootEnabled() != other.getSnapshotTrashRootEnabled()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockSize()) { hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockSize()); } if (hasBytesPerChecksum()) { hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getBytesPerChecksum(); } if (hasWritePacketSize()) { hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER; hash = (53 * hash) + getWritePacketSize(); } if (hasReplication()) { hash = (37 * hash) + REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getReplication(); } if (hasFileBufferSize()) { hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER; hash = (53 * hash) + getFileBufferSize(); } if (hasEncryptDataTransfer()) { hash = (37 * hash) + ENCRYPTDATATRANSFER_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getEncryptDataTransfer()); } if (hasTrashInterval()) { hash = (37 * hash) + TRASHINTERVAL_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getTrashInterval()); } if (hasChecksumType()) { hash = (37 * hash) + CHECKSUMTYPE_FIELD_NUMBER; hash = (53 * hash) + checksumType_; } if (hasKeyProviderUri()) { hash = (37 * hash) + KEYPROVIDERURI_FIELD_NUMBER; hash = (53 * hash) + getKeyProviderUri().hashCode(); } if (hasPolicyId()) { hash = (37 * hash) + POLICYID_FIELD_NUMBER; hash = (53 * hash) + getPolicyId(); } if (hasSnapshotTrashRootEnabled()) { hash = (37 * hash) + SNAPSHOTTRASHROOTENABLED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getSnapshotTrashRootEnabled()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * HDFS Server Defaults
     * 
* * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.FsServerDefaultsProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; blockSize_ = 0L; bytesPerChecksum_ = 0; writePacketSize_ = 0; replication_ = 0; fileBufferSize_ = 0; encryptDataTransfer_ = false; trashInterval_ = 0L; checksumType_ = 1; keyProviderUri_ = ""; policyId_ = 0; snapshotTrashRootEnabled_ = false; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.blockSize_ = blockSize_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.bytesPerChecksum_ = bytesPerChecksum_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.writePacketSize_ = writePacketSize_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.replication_ = replication_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.fileBufferSize_ = fileBufferSize_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.encryptDataTransfer_ = encryptDataTransfer_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.trashInterval_ = trashInterval_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.checksumType_ = checksumType_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { result.keyProviderUri_ = keyProviderUri_; to_bitField0_ |= 0x00000100; } if (((from_bitField0_ & 0x00000200) != 0)) { result.policyId_ = policyId_; to_bitField0_ |= 0x00000200; } if (((from_bitField0_ & 0x00000400) != 0)) { result.snapshotTrashRootEnabled_ = snapshotTrashRootEnabled_; to_bitField0_ |= 0x00000400; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this; if (other.hasBlockSize()) { setBlockSize(other.getBlockSize()); } if (other.hasBytesPerChecksum()) { setBytesPerChecksum(other.getBytesPerChecksum()); } if (other.hasWritePacketSize()) { setWritePacketSize(other.getWritePacketSize()); } if (other.hasReplication()) { setReplication(other.getReplication()); } if (other.hasFileBufferSize()) { setFileBufferSize(other.getFileBufferSize()); } if (other.hasEncryptDataTransfer()) { setEncryptDataTransfer(other.getEncryptDataTransfer()); } if (other.hasTrashInterval()) { setTrashInterval(other.getTrashInterval()); } if (other.hasChecksumType()) { setChecksumType(other.getChecksumType()); } if (other.hasKeyProviderUri()) { keyProviderUri_ = other.keyProviderUri_; bitField0_ |= 0x00000100; onChanged(); } if (other.hasPolicyId()) { setPolicyId(other.getPolicyId()); } if (other.hasSnapshotTrashRootEnabled()) { setSnapshotTrashRootEnabled(other.getSnapshotTrashRootEnabled()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasBlockSize()) { return false; } if (!hasBytesPerChecksum()) { return false; } if (!hasWritePacketSize()) { return false; } if (!hasReplication()) { return false; } if (!hasFileBufferSize()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { blockSize_ = input.readUInt64(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { bytesPerChecksum_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { writePacketSize_ = input.readUInt32(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { replication_ = input.readUInt32(); bitField0_ |= 0x00000008; break; } // case 32 case 40: { fileBufferSize_ = input.readUInt32(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { encryptDataTransfer_ = input.readBool(); bitField0_ |= 0x00000020; break; } // case 48 case 56: { trashInterval_ = input.readUInt64(); bitField0_ |= 0x00000040; break; } // case 56 case 64: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(8, tmpRaw); } else { checksumType_ = tmpRaw; bitField0_ |= 0x00000080; } break; } // case 64 case 74: { keyProviderUri_ = input.readBytes(); bitField0_ |= 0x00000100; break; } // case 74 case 80: { policyId_ = input.readUInt32(); bitField0_ |= 0x00000200; break; } // case 80 case 88: { snapshotTrashRootEnabled_ = input.readBool(); bitField0_ |= 0x00000400; break; } // case 88 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private long blockSize_ ; /** * required uint64 blockSize = 1; * @return Whether the blockSize field is set. */ @java.lang.Override public boolean hasBlockSize() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockSize = 1; * @return The blockSize. */ @java.lang.Override public long getBlockSize() { return blockSize_; } /** * required uint64 blockSize = 1; * @param value The blockSize to set. * @return This builder for chaining. */ public Builder setBlockSize(long value) { blockSize_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required uint64 blockSize = 1; * @return This builder for chaining. */ public Builder clearBlockSize() { bitField0_ = (bitField0_ & ~0x00000001); blockSize_ = 0L; onChanged(); return this; } private int bytesPerChecksum_ ; /** * required uint32 bytesPerChecksum = 2; * @return Whether the bytesPerChecksum field is set. */ @java.lang.Override public boolean hasBytesPerChecksum() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 bytesPerChecksum = 2; * @return The bytesPerChecksum. */ @java.lang.Override public int getBytesPerChecksum() { return bytesPerChecksum_; } /** * required uint32 bytesPerChecksum = 2; * @param value The bytesPerChecksum to set. * @return This builder for chaining. */ public Builder setBytesPerChecksum(int value) { bytesPerChecksum_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint32 bytesPerChecksum = 2; * @return This builder for chaining. */ public Builder clearBytesPerChecksum() { bitField0_ = (bitField0_ & ~0x00000002); bytesPerChecksum_ = 0; onChanged(); return this; } private int writePacketSize_ ; /** * required uint32 writePacketSize = 3; * @return Whether the writePacketSize field is set. */ @java.lang.Override public boolean hasWritePacketSize() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 writePacketSize = 3; * @return The writePacketSize. */ @java.lang.Override public int getWritePacketSize() { return writePacketSize_; } /** * required uint32 writePacketSize = 3; * @param value The writePacketSize to set. * @return This builder for chaining. */ public Builder setWritePacketSize(int value) { writePacketSize_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint32 writePacketSize = 3; * @return This builder for chaining. */ public Builder clearWritePacketSize() { bitField0_ = (bitField0_ & ~0x00000004); writePacketSize_ = 0; onChanged(); return this; } private int replication_ ; /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; * @return Whether the replication field is set. */ @java.lang.Override public boolean hasReplication() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; * @return The replication. */ @java.lang.Override public int getReplication() { return replication_; } /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; * @param value The replication to set. * @return This builder for chaining. */ public Builder setReplication(int value) { replication_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; * @return This builder for chaining. */ public Builder clearReplication() { bitField0_ = (bitField0_ & ~0x00000008); replication_ = 0; onChanged(); return this; } private int fileBufferSize_ ; /** * required uint32 fileBufferSize = 5; * @return Whether the fileBufferSize field is set. */ @java.lang.Override public boolean hasFileBufferSize() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint32 fileBufferSize = 5; * @return The fileBufferSize. */ @java.lang.Override public int getFileBufferSize() { return fileBufferSize_; } /** * required uint32 fileBufferSize = 5; * @param value The fileBufferSize to set. * @return This builder for chaining. */ public Builder setFileBufferSize(int value) { fileBufferSize_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required uint32 fileBufferSize = 5; * @return This builder for chaining. */ public Builder clearFileBufferSize() { bitField0_ = (bitField0_ & ~0x00000010); fileBufferSize_ = 0; onChanged(); return this; } private boolean encryptDataTransfer_ ; /** * optional bool encryptDataTransfer = 6 [default = false]; * @return Whether the encryptDataTransfer field is set. */ @java.lang.Override public boolean hasEncryptDataTransfer() { return ((bitField0_ & 0x00000020) != 0); } /** * optional bool encryptDataTransfer = 6 [default = false]; * @return The encryptDataTransfer. */ @java.lang.Override public boolean getEncryptDataTransfer() { return encryptDataTransfer_; } /** * optional bool encryptDataTransfer = 6 [default = false]; * @param value The encryptDataTransfer to set. * @return This builder for chaining. */ public Builder setEncryptDataTransfer(boolean value) { encryptDataTransfer_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * optional bool encryptDataTransfer = 6 [default = false]; * @return This builder for chaining. */ public Builder clearEncryptDataTransfer() { bitField0_ = (bitField0_ & ~0x00000020); encryptDataTransfer_ = false; onChanged(); return this; } private long trashInterval_ ; /** * optional uint64 trashInterval = 7 [default = 0]; * @return Whether the trashInterval field is set. */ @java.lang.Override public boolean hasTrashInterval() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint64 trashInterval = 7 [default = 0]; * @return The trashInterval. */ @java.lang.Override public long getTrashInterval() { return trashInterval_; } /** * optional uint64 trashInterval = 7 [default = 0]; * @param value The trashInterval to set. * @return This builder for chaining. */ public Builder setTrashInterval(long value) { trashInterval_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * optional uint64 trashInterval = 7 [default = 0]; * @return This builder for chaining. */ public Builder clearTrashInterval() { bitField0_ = (bitField0_ & ~0x00000040); trashInterval_ = 0L; onChanged(); return this; } private int checksumType_ = 1; /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @return Whether the checksumType field is set. */ @java.lang.Override public boolean hasChecksumType() { return ((bitField0_ & 0x00000080) != 0); } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @return The checksumType. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(checksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32 : result; } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @param value The checksumType to set. * @return This builder for chaining. */ public Builder setChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000080; checksumType_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; * @return This builder for chaining. */ public Builder clearChecksumType() { bitField0_ = (bitField0_ & ~0x00000080); checksumType_ = 1; onChanged(); return this; } private java.lang.Object keyProviderUri_ = ""; /** * optional string keyProviderUri = 9; * @return Whether the keyProviderUri field is set. */ public boolean hasKeyProviderUri() { return ((bitField0_ & 0x00000100) != 0); } /** * optional string keyProviderUri = 9; * @return The keyProviderUri. */ public java.lang.String getKeyProviderUri() { java.lang.Object ref = keyProviderUri_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyProviderUri_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string keyProviderUri = 9; * @return The bytes for keyProviderUri. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyProviderUriBytes() { java.lang.Object ref = keyProviderUri_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyProviderUri_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string keyProviderUri = 9; * @param value The keyProviderUri to set. * @return This builder for chaining. */ public Builder setKeyProviderUri( java.lang.String value) { if (value == null) { throw new NullPointerException(); } keyProviderUri_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** * optional string keyProviderUri = 9; * @return This builder for chaining. */ public Builder clearKeyProviderUri() { keyProviderUri_ = getDefaultInstance().getKeyProviderUri(); bitField0_ = (bitField0_ & ~0x00000100); onChanged(); return this; } /** * optional string keyProviderUri = 9; * @param value The bytes for keyProviderUri to set. * @return This builder for chaining. */ public Builder setKeyProviderUriBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } keyProviderUri_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } private int policyId_ ; /** * optional uint32 policyId = 10 [default = 0]; * @return Whether the policyId field is set. */ @java.lang.Override public boolean hasPolicyId() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint32 policyId = 10 [default = 0]; * @return The policyId. */ @java.lang.Override public int getPolicyId() { return policyId_; } /** * optional uint32 policyId = 10 [default = 0]; * @param value The policyId to set. * @return This builder for chaining. */ public Builder setPolicyId(int value) { policyId_ = value; bitField0_ |= 0x00000200; onChanged(); return this; } /** * optional uint32 policyId = 10 [default = 0]; * @return This builder for chaining. */ public Builder clearPolicyId() { bitField0_ = (bitField0_ & ~0x00000200); policyId_ = 0; onChanged(); return this; } private boolean snapshotTrashRootEnabled_ ; /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @return Whether the snapshotTrashRootEnabled field is set. */ @java.lang.Override public boolean hasSnapshotTrashRootEnabled() { return ((bitField0_ & 0x00000400) != 0); } /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @return The snapshotTrashRootEnabled. */ @java.lang.Override public boolean getSnapshotTrashRootEnabled() { return snapshotTrashRootEnabled_; } /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @param value The snapshotTrashRootEnabled to set. * @return This builder for chaining. */ public Builder setSnapshotTrashRootEnabled(boolean value) { snapshotTrashRootEnabled_ = value; bitField0_ |= 0x00000400; onChanged(); return this; } /** * optional bool snapshotTrashRootEnabled = 11 [default = false]; * @return This builder for chaining. */ public Builder clearSnapshotTrashRootEnabled() { bitField0_ = (bitField0_ & ~0x00000400); snapshotTrashRootEnabled_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsServerDefaultsProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FsServerDefaultsProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FsServerDefaultsProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DirectoryListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DirectoryListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ int getPartialListingCount(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingOrBuilderList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index); /** * required uint32 remainingEntries = 2; * @return Whether the remainingEntries field is set. */ boolean hasRemainingEntries(); /** * required uint32 remainingEntries = 2; * @return The remainingEntries. */ int getRemainingEntries(); } /** *
   **
   * Directory listing
   * 
* * Protobuf type {@code hadoop.hdfs.DirectoryListingProto} */ public static final class DirectoryListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DirectoryListingProto) DirectoryListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DirectoryListingProto.newBuilder() to construct. private DirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DirectoryListingProto() { partialListing_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new DirectoryListingProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); } private int bitField0_; public static final int PARTIALLISTING_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List partialListing_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public java.util.List getPartialListingList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public java.util.List getPartialListingOrBuilderList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public int getPartialListingCount() { return partialListing_.size(); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { return partialListing_.get(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { return partialListing_.get(index); } public static final int REMAININGENTRIES_FIELD_NUMBER = 2; private int remainingEntries_ = 0; /** * required uint32 remainingEntries = 2; * @return Whether the remainingEntries field is set. */ @java.lang.Override public boolean hasRemainingEntries() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 remainingEntries = 2; * @return The remainingEntries. */ @java.lang.Override public int getRemainingEntries() { return remainingEntries_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasRemainingEntries()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < partialListing_.size(); i++) { output.writeMessage(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(2, remainingEntries_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < partialListing_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, remainingEntries_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) obj; if (!getPartialListingList() .equals(other.getPartialListingList())) return false; if (hasRemainingEntries() != other.hasRemainingEntries()) return false; if (hasRemainingEntries()) { if (getRemainingEntries() != other.getRemainingEntries()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getPartialListingCount() > 0) { hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER; hash = (53 * hash) + getPartialListingList().hashCode(); } if (hasRemainingEntries()) { hash = (37 * hash) + REMAININGENTRIES_FIELD_NUMBER; hash = (53 * hash) + getRemainingEntries(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Directory listing
     * 
* * Protobuf type {@code hadoop.hdfs.DirectoryListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DirectoryListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); } else { partialListing_ = null; partialListingBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); remainingEntries_ = 0; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result) { if (partialListingBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.partialListing_ = partialListing_; } else { result.partialListing_ = partialListingBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000002) != 0)) { result.remainingEntries_ = remainingEntries_; to_bitField0_ |= 0x00000001; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) return this; if (partialListingBuilder_ == null) { if (!other.partialListing_.isEmpty()) { if (partialListing_.isEmpty()) { partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensurePartialListingIsMutable(); partialListing_.addAll(other.partialListing_); } onChanged(); } } else { if (!other.partialListing_.isEmpty()) { if (partialListingBuilder_.isEmpty()) { partialListingBuilder_.dispose(); partialListingBuilder_ = null; partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); partialListingBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getPartialListingFieldBuilder() : null; } else { partialListingBuilder_.addAllMessages(other.partialListing_); } } } if (other.hasRemainingEntries()) { setRemainingEntries(other.getRemainingEntries()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasRemainingEntries()) { return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(m); } else { partialListingBuilder_.addMessage(m); } break; } // case 10 case 16: { remainingEntries_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List partialListing_ = java.util.Collections.emptyList(); private void ensurePartialListingIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { partialListing_ = new java.util.ArrayList(partialListing_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { if (partialListingBuilder_ == null) { return java.util.Collections.unmodifiableList(partialListing_); } else { return partialListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { if (partialListingBuilder_ == null) { return partialListing_.size(); } else { return partialListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.set(index, value); onChanged(); } else { partialListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.set(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(value); onChanged(); } else { partialListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(index, value); onChanged(); } else { partialListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addAllPartialListing( java.lang.Iterable values) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, partialListing_); onChanged(); } else { partialListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder clearPartialListing() { if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { partialListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder removePartialListing(int index) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.remove(index); onChanged(); } else { partialListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder( int index) { return getPartialListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { if (partialListingBuilder_ != null) { return partialListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(partialListing_); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() { return getPartialListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder( int index) { return getPartialListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingBuilderList() { return getPartialListingFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getPartialListingFieldBuilder() { if (partialListingBuilder_ == null) { partialListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( partialListing_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); partialListing_ = null; } return partialListingBuilder_; } private int remainingEntries_ ; /** * required uint32 remainingEntries = 2; * @return Whether the remainingEntries field is set. */ @java.lang.Override public boolean hasRemainingEntries() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 remainingEntries = 2; * @return The remainingEntries. */ @java.lang.Override public int getRemainingEntries() { return remainingEntries_; } /** * required uint32 remainingEntries = 2; * @param value The remainingEntries to set. * @return This builder for chaining. */ public Builder setRemainingEntries(int value) { remainingEntries_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint32 remainingEntries = 2; * @return This builder for chaining. */ public Builder clearRemainingEntries() { bitField0_ = (bitField0_ & ~0x00000002); remainingEntries_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DirectoryListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DirectoryListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DirectoryListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RemoteExceptionProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RemoteExceptionProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string className = 1; * @return Whether the className field is set. */ boolean hasClassName(); /** * required string className = 1; * @return The className. */ java.lang.String getClassName(); /** * required string className = 1; * @return The bytes for className. */ org.apache.hadoop.thirdparty.protobuf.ByteString getClassNameBytes(); /** * optional string message = 2; * @return Whether the message field is set. */ boolean hasMessage(); /** * optional string message = 2; * @return The message. */ java.lang.String getMessage(); /** * optional string message = 2; * @return The bytes for message. */ org.apache.hadoop.thirdparty.protobuf.ByteString getMessageBytes(); } /** * Protobuf type {@code hadoop.hdfs.RemoteExceptionProto} */ public static final class RemoteExceptionProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.RemoteExceptionProto) RemoteExceptionProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RemoteExceptionProto.newBuilder() to construct. private RemoteExceptionProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RemoteExceptionProto() { className_ = ""; message_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new RemoteExceptionProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder.class); } private int bitField0_; public static final int CLASSNAME_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object className_ = ""; /** * required string className = 1; * @return Whether the className field is set. */ @java.lang.Override public boolean hasClassName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string className = 1; * @return The className. */ @java.lang.Override public java.lang.String getClassName() { java.lang.Object ref = className_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { className_ = s; } return s; } } /** * required string className = 1; * @return The bytes for className. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getClassNameBytes() { java.lang.Object ref = className_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); className_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int MESSAGE_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object message_ = ""; /** * optional string message = 2; * @return Whether the message field is set. */ @java.lang.Override public boolean hasMessage() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string message = 2; * @return The message. */ @java.lang.Override public java.lang.String getMessage() { java.lang.Object ref = message_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { message_ = s; } return s; } } /** * optional string message = 2; * @return The bytes for message. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getMessageBytes() { java.lang.Object ref = message_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); message_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasClassName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, className_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, message_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, className_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, message_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto) obj; if (hasClassName() != other.hasClassName()) return false; if (hasClassName()) { if (!getClassName() .equals(other.getClassName())) return false; } if (hasMessage() != other.hasMessage()) return false; if (hasMessage()) { if (!getMessage() .equals(other.getMessage())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasClassName()) { hash = (37 * hash) + CLASSNAME_FIELD_NUMBER; hash = (53 * hash) + getClassName().hashCode(); } if (hasMessage()) { hash = (37 * hash) + MESSAGE_FIELD_NUMBER; hash = (53 * hash) + getMessage().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoteExceptionProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RemoteExceptionProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; className_ = ""; message_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.className_ = className_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.message_ = message_; to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance()) return this; if (other.hasClassName()) { className_ = other.className_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasMessage()) { message_ = other.message_; bitField0_ |= 0x00000002; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasClassName()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { className_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { message_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object className_ = ""; /** * required string className = 1; * @return Whether the className field is set. */ public boolean hasClassName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string className = 1; * @return The className. */ public java.lang.String getClassName() { java.lang.Object ref = className_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { className_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string className = 1; * @return The bytes for className. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getClassNameBytes() { java.lang.Object ref = className_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); className_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string className = 1; * @param value The className to set. * @return This builder for chaining. */ public Builder setClassName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } className_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string className = 1; * @return This builder for chaining. */ public Builder clearClassName() { className_ = getDefaultInstance().getClassName(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string className = 1; * @param value The bytes for className to set. * @return This builder for chaining. */ public Builder setClassNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } className_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private java.lang.Object message_ = ""; /** * optional string message = 2; * @return Whether the message field is set. */ public boolean hasMessage() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string message = 2; * @return The message. */ public java.lang.String getMessage() { java.lang.Object ref = message_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { message_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string message = 2; * @return The bytes for message. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getMessageBytes() { java.lang.Object ref = message_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); message_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string message = 2; * @param value The message to set. * @return This builder for chaining. */ public Builder setMessage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } message_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional string message = 2; * @return This builder for chaining. */ public Builder clearMessage() { message_ = getDefaultInstance().getMessage(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * optional string message = 2; * @param value The bytes for message to set. * @return This builder for chaining. */ public Builder setMessageBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } message_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoteExceptionProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoteExceptionProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RemoteExceptionProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BatchedDirectoryListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BatchedDirectoryListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ int getPartialListingCount(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingOrBuilderList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index); /** * required uint32 parentIdx = 2; * @return Whether the parentIdx field is set. */ boolean hasParentIdx(); /** * required uint32 parentIdx = 2; * @return The parentIdx. */ int getParentIdx(); /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; * @return Whether the exception field is set. */ boolean hasException(); /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; * @return The exception. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException(); /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder(); } /** *
   * Directory listing result for a batched listing call.
   * 
* * Protobuf type {@code hadoop.hdfs.BatchedDirectoryListingProto} */ public static final class BatchedDirectoryListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BatchedDirectoryListingProto) BatchedDirectoryListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BatchedDirectoryListingProto.newBuilder() to construct. private BatchedDirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BatchedDirectoryListingProto() { partialListing_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new BatchedDirectoryListingProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.Builder.class); } private int bitField0_; public static final int PARTIALLISTING_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List partialListing_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public java.util.List getPartialListingList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public java.util.List getPartialListingOrBuilderList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public int getPartialListingCount() { return partialListing_.size(); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { return partialListing_.get(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { return partialListing_.get(index); } public static final int PARENTIDX_FIELD_NUMBER = 2; private int parentIdx_ = 0; /** * required uint32 parentIdx = 2; * @return Whether the parentIdx field is set. */ @java.lang.Override public boolean hasParentIdx() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 parentIdx = 2; * @return The parentIdx. */ @java.lang.Override public int getParentIdx() { return parentIdx_; } public static final int EXCEPTION_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto exception_; /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; * @return Whether the exception field is set. */ @java.lang.Override public boolean hasException() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; * @return The exception. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException() { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder() { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasParentIdx()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasException()) { if (!getException().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < partialListing_.size(); i++) { output.writeMessage(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(2, parentIdx_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(3, getException()); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < partialListing_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, parentIdx_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, getException()); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto) obj; if (!getPartialListingList() .equals(other.getPartialListingList())) return false; if (hasParentIdx() != other.hasParentIdx()) return false; if (hasParentIdx()) { if (getParentIdx() != other.getParentIdx()) return false; } if (hasException() != other.hasException()) return false; if (hasException()) { if (!getException() .equals(other.getException())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getPartialListingCount() > 0) { hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER; hash = (53 * hash) + getPartialListingList().hashCode(); } if (hasParentIdx()) { hash = (37 * hash) + PARENTIDX_FIELD_NUMBER; hash = (53 * hash) + getParentIdx(); } if (hasException()) { hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; hash = (53 * hash) + getException().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     * Directory listing result for a batched listing call.
     * 
* * Protobuf type {@code hadoop.hdfs.BatchedDirectoryListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BatchedDirectoryListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPartialListingFieldBuilder(); getExceptionFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); } else { partialListing_ = null; partialListingBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); parentIdx_ = 0; exception_ = null; if (exceptionBuilder_ != null) { exceptionBuilder_.dispose(); exceptionBuilder_ = null; } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result) { if (partialListingBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.partialListing_ = partialListing_; } else { result.partialListing_ = partialListingBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000002) != 0)) { result.parentIdx_ = parentIdx_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000004) != 0)) { result.exception_ = exceptionBuilder_ == null ? exception_ : exceptionBuilder_.build(); to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.getDefaultInstance()) return this; if (partialListingBuilder_ == null) { if (!other.partialListing_.isEmpty()) { if (partialListing_.isEmpty()) { partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensurePartialListingIsMutable(); partialListing_.addAll(other.partialListing_); } onChanged(); } } else { if (!other.partialListing_.isEmpty()) { if (partialListingBuilder_.isEmpty()) { partialListingBuilder_.dispose(); partialListingBuilder_ = null; partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); partialListingBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getPartialListingFieldBuilder() : null; } else { partialListingBuilder_.addAllMessages(other.partialListing_); } } } if (other.hasParentIdx()) { setParentIdx(other.getParentIdx()); } if (other.hasException()) { mergeException(other.getException()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasParentIdx()) { return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { return false; } } if (hasException()) { if (!getException().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(m); } else { partialListingBuilder_.addMessage(m); } break; } // case 10 case 16: { parentIdx_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { input.readMessage( getExceptionFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000004; break; } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List partialListing_ = java.util.Collections.emptyList(); private void ensurePartialListingIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { partialListing_ = new java.util.ArrayList(partialListing_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { if (partialListingBuilder_ == null) { return java.util.Collections.unmodifiableList(partialListing_); } else { return partialListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { if (partialListingBuilder_ == null) { return partialListing_.size(); } else { return partialListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.set(index, value); onChanged(); } else { partialListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.set(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(value); onChanged(); } else { partialListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(index, value); onChanged(); } else { partialListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addAllPartialListing( java.lang.Iterable values) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, partialListing_); onChanged(); } else { partialListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder clearPartialListing() { if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { partialListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder removePartialListing(int index) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.remove(index); onChanged(); } else { partialListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder( int index) { return getPartialListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { if (partialListingBuilder_ != null) { return partialListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(partialListing_); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() { return getPartialListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder( int index) { return getPartialListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingBuilderList() { return getPartialListingFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getPartialListingFieldBuilder() { if (partialListingBuilder_ == null) { partialListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( partialListing_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); partialListing_ = null; } return partialListingBuilder_; } private int parentIdx_ ; /** * required uint32 parentIdx = 2; * @return Whether the parentIdx field is set. */ @java.lang.Override public boolean hasParentIdx() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 parentIdx = 2; * @return The parentIdx. */ @java.lang.Override public int getParentIdx() { return parentIdx_; } /** * required uint32 parentIdx = 2; * @param value The parentIdx to set. * @return This builder for chaining. */ public Builder setParentIdx(int value) { parentIdx_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint32 parentIdx = 2; * @return This builder for chaining. */ public Builder clearParentIdx() { bitField0_ = (bitField0_ & ~0x00000002); parentIdx_ = 0; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto exception_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder> exceptionBuilder_; /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; * @return Whether the exception field is set. */ public boolean hasException() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; * @return The exception. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException() { if (exceptionBuilder_ == null) { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } else { return exceptionBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder setException(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto value) { if (exceptionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } exception_ = value; } else { exceptionBuilder_.setMessage(value); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder setException( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder builderForValue) { if (exceptionBuilder_ == null) { exception_ = builderForValue.build(); } else { exceptionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder mergeException(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto value) { if (exceptionBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0) && exception_ != null && exception_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance()) { getExceptionBuilder().mergeFrom(value); } else { exception_ = value; } } else { exceptionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder clearException() { bitField0_ = (bitField0_ & ~0x00000004); exception_ = null; if (exceptionBuilder_ != null) { exceptionBuilder_.dispose(); exceptionBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder getExceptionBuilder() { bitField0_ |= 0x00000004; onChanged(); return getExceptionFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder() { if (exceptionBuilder_ != null) { return exceptionBuilder_.getMessageOrBuilder(); } else { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder> getExceptionFieldBuilder() { if (exceptionBuilder_ == null) { exceptionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder>( getException(), getParentForChildren(), isClean()); exception_ = null; } return exceptionBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BatchedDirectoryListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BatchedDirectoryListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BatchedDirectoryListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshottableDirectoryStatusProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshottableDirectoryStatusProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return Whether the dirStatus field is set. */ boolean hasDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return The dirStatus. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder(); /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; * @return Whether the snapshotQuota field is set. */ boolean hasSnapshotQuota(); /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; * @return The snapshotQuota. */ int getSnapshotQuota(); /** * required uint32 snapshot_number = 3; * @return Whether the snapshotNumber field is set. */ boolean hasSnapshotNumber(); /** * required uint32 snapshot_number = 3; * @return The snapshotNumber. */ int getSnapshotNumber(); /** * required bytes parent_fullpath = 4; * @return Whether the parentFullpath field is set. */ boolean hasParentFullpath(); /** * required bytes parent_fullpath = 4; * @return The parentFullpath. */ org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath(); } /** *
   **
   * Status of a snapshottable directory: besides the normal information for 
   * a directory status, also include snapshot quota, number of snapshots, and
   * the full path of the parent directory. 
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto} */ public static final class SnapshottableDirectoryStatusProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshottableDirectoryStatusProto) SnapshottableDirectoryStatusProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshottableDirectoryStatusProto.newBuilder() to construct. private SnapshottableDirectoryStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshottableDirectoryStatusProto() { parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshottableDirectoryStatusProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class); } private int bitField0_; public static final int DIRSTATUS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return Whether the dirStatus field is set. */ @java.lang.Override public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return The dirStatus. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } public static final int SNAPSHOT_QUOTA_FIELD_NUMBER = 2; private int snapshotQuota_ = 0; /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; * @return Whether the snapshotQuota field is set. */ @java.lang.Override public boolean hasSnapshotQuota() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; * @return The snapshotQuota. */ @java.lang.Override public int getSnapshotQuota() { return snapshotQuota_; } public static final int SNAPSHOT_NUMBER_FIELD_NUMBER = 3; private int snapshotNumber_ = 0; /** * required uint32 snapshot_number = 3; * @return Whether the snapshotNumber field is set. */ @java.lang.Override public boolean hasSnapshotNumber() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 snapshot_number = 3; * @return The snapshotNumber. */ @java.lang.Override public int getSnapshotNumber() { return snapshotNumber_; } public static final int PARENT_FULLPATH_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes parent_fullpath = 4; * @return Whether the parentFullpath field is set. */ @java.lang.Override public boolean hasParentFullpath() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes parent_fullpath = 4; * @return The parentFullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() { return parentFullpath_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasDirStatus()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotNumber()) { memoizedIsInitialized = 0; return false; } if (!hasParentFullpath()) { memoizedIsInitialized = 0; return false; } if (!getDirStatus().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getDirStatus()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, snapshotQuota_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, snapshotNumber_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, parentFullpath_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getDirStatus()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, snapshotQuota_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, snapshotNumber_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, parentFullpath_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) obj; if (hasDirStatus() != other.hasDirStatus()) return false; if (hasDirStatus()) { if (!getDirStatus() .equals(other.getDirStatus())) return false; } if (hasSnapshotQuota() != other.hasSnapshotQuota()) return false; if (hasSnapshotQuota()) { if (getSnapshotQuota() != other.getSnapshotQuota()) return false; } if (hasSnapshotNumber() != other.hasSnapshotNumber()) return false; if (hasSnapshotNumber()) { if (getSnapshotNumber() != other.getSnapshotNumber()) return false; } if (hasParentFullpath() != other.hasParentFullpath()) return false; if (hasParentFullpath()) { if (!getParentFullpath() .equals(other.getParentFullpath())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasDirStatus()) { hash = (37 * hash) + DIRSTATUS_FIELD_NUMBER; hash = (53 * hash) + getDirStatus().hashCode(); } if (hasSnapshotQuota()) { hash = (37 * hash) + SNAPSHOT_QUOTA_FIELD_NUMBER; hash = (53 * hash) + getSnapshotQuota(); } if (hasSnapshotNumber()) { hash = (37 * hash) + SNAPSHOT_NUMBER_FIELD_NUMBER; hash = (53 * hash) + getSnapshotNumber(); } if (hasParentFullpath()) { hash = (37 * hash) + PARENT_FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getParentFullpath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Status of a snapshottable directory: besides the normal information for 
     * a directory status, also include snapshot quota, number of snapshots, and
     * the full path of the parent directory. 
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshottableDirectoryStatusProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getDirStatusFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; dirStatus_ = null; if (dirStatusBuilder_ != null) { dirStatusBuilder_.dispose(); dirStatusBuilder_ = null; } snapshotQuota_ = 0; snapshotNumber_ = 0; parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.dirStatus_ = dirStatusBuilder_ == null ? dirStatus_ : dirStatusBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.snapshotQuota_ = snapshotQuota_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.snapshotNumber_ = snapshotNumber_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.parentFullpath_ = parentFullpath_; to_bitField0_ |= 0x00000008; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()) return this; if (other.hasDirStatus()) { mergeDirStatus(other.getDirStatus()); } if (other.hasSnapshotQuota()) { setSnapshotQuota(other.getSnapshotQuota()); } if (other.hasSnapshotNumber()) { setSnapshotNumber(other.getSnapshotNumber()); } if (other.hasParentFullpath()) { setParentFullpath(other.getParentFullpath()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasDirStatus()) { return false; } if (!hasSnapshotQuota()) { return false; } if (!hasSnapshotNumber()) { return false; } if (!hasParentFullpath()) { return false; } if (!getDirStatus().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { input.readMessage( getDirStatusFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000001; break; } // case 10 case 16: { snapshotQuota_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { snapshotNumber_ = input.readUInt32(); bitField0_ |= 0x00000004; break; } // case 24 case 34: { parentFullpath_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> dirStatusBuilder_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return Whether the dirStatus field is set. */ public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return The dirStatus. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { if (dirStatusBuilder_ == null) { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } else { return dirStatusBuilder_.getMessage(); } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dirStatus_ = value; } else { dirStatusBuilder_.setMessage(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (dirStatusBuilder_ == null) { dirStatus_ = builderForValue.build(); } else { dirStatusBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder mergeDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && dirStatus_ != null && dirStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { getDirStatusBuilder().mergeFrom(value); } else { dirStatus_ = value; } } else { dirStatusBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder clearDirStatus() { bitField0_ = (bitField0_ & ~0x00000001); dirStatus_ = null; if (dirStatusBuilder_ != null) { dirStatusBuilder_.dispose(); dirStatusBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getDirStatusBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDirStatusFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { if (dirStatusBuilder_ != null) { return dirStatusBuilder_.getMessageOrBuilder(); } else { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getDirStatusFieldBuilder() { if (dirStatusBuilder_ == null) { dirStatusBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( getDirStatus(), getParentForChildren(), isClean()); dirStatus_ = null; } return dirStatusBuilder_; } private int snapshotQuota_ ; /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; * @return Whether the snapshotQuota field is set. */ @java.lang.Override public boolean hasSnapshotQuota() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; * @return The snapshotQuota. */ @java.lang.Override public int getSnapshotQuota() { return snapshotQuota_; } /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; * @param value The snapshotQuota to set. * @return This builder for chaining. */ public Builder setSnapshotQuota(int value) { snapshotQuota_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; * @return This builder for chaining. */ public Builder clearSnapshotQuota() { bitField0_ = (bitField0_ & ~0x00000002); snapshotQuota_ = 0; onChanged(); return this; } private int snapshotNumber_ ; /** * required uint32 snapshot_number = 3; * @return Whether the snapshotNumber field is set. */ @java.lang.Override public boolean hasSnapshotNumber() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 snapshot_number = 3; * @return The snapshotNumber. */ @java.lang.Override public int getSnapshotNumber() { return snapshotNumber_; } /** * required uint32 snapshot_number = 3; * @param value The snapshotNumber to set. * @return This builder for chaining. */ public Builder setSnapshotNumber(int value) { snapshotNumber_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required uint32 snapshot_number = 3; * @return This builder for chaining. */ public Builder clearSnapshotNumber() { bitField0_ = (bitField0_ & ~0x00000004); snapshotNumber_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes parent_fullpath = 4; * @return Whether the parentFullpath field is set. */ @java.lang.Override public boolean hasParentFullpath() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes parent_fullpath = 4; * @return The parentFullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() { return parentFullpath_; } /** * required bytes parent_fullpath = 4; * @param value The parentFullpath to set. * @return This builder for chaining. */ public Builder setParentFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } parentFullpath_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required bytes parent_fullpath = 4; * @return This builder for chaining. */ public Builder clearParentFullpath() { bitField0_ = (bitField0_ & ~0x00000008); parentFullpath_ = getDefaultInstance().getParentFullpath(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshottableDirectoryStatusProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotStatusProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotStatusProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return Whether the dirStatus field is set. */ boolean hasDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return The dirStatus. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder(); /** *
     * Fields specific for snapshot directory
     * 
* * required uint32 snapshotID = 2; * @return Whether the snapshotID field is set. */ boolean hasSnapshotID(); /** *
     * Fields specific for snapshot directory
     * 
* * required uint32 snapshotID = 2; * @return The snapshotID. */ int getSnapshotID(); /** * required bytes parent_fullpath = 3; * @return Whether the parentFullpath field is set. */ boolean hasParentFullpath(); /** * required bytes parent_fullpath = 3; * @return The parentFullpath. */ org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath(); /** * required bool isDeleted = 4; * @return Whether the isDeleted field is set. */ boolean hasIsDeleted(); /** * required bool isDeleted = 4; * @return The isDeleted. */ boolean getIsDeleted(); } /** *
   **
   * Status of a snapshot directory: besides the normal information for
   * a directory status, also include snapshot ID, and
   * the full path of the parent directory.
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotStatusProto} */ public static final class SnapshotStatusProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotStatusProto) SnapshotStatusProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotStatusProto.newBuilder() to construct. private SnapshotStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotStatusProto() { parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotStatusProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder.class); } private int bitField0_; public static final int DIRSTATUS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return Whether the dirStatus field is set. */ @java.lang.Override public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return The dirStatus. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } public static final int SNAPSHOTID_FIELD_NUMBER = 2; private int snapshotID_ = 0; /** *
     * Fields specific for snapshot directory
     * 
* * required uint32 snapshotID = 2; * @return Whether the snapshotID field is set. */ @java.lang.Override public boolean hasSnapshotID() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * Fields specific for snapshot directory
     * 
* * required uint32 snapshotID = 2; * @return The snapshotID. */ @java.lang.Override public int getSnapshotID() { return snapshotID_; } public static final int PARENT_FULLPATH_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes parent_fullpath = 3; * @return Whether the parentFullpath field is set. */ @java.lang.Override public boolean hasParentFullpath() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes parent_fullpath = 3; * @return The parentFullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() { return parentFullpath_; } public static final int ISDELETED_FIELD_NUMBER = 4; private boolean isDeleted_ = false; /** * required bool isDeleted = 4; * @return Whether the isDeleted field is set. */ @java.lang.Override public boolean hasIsDeleted() { return ((bitField0_ & 0x00000008) != 0); } /** * required bool isDeleted = 4; * @return The isDeleted. */ @java.lang.Override public boolean getIsDeleted() { return isDeleted_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasDirStatus()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotID()) { memoizedIsInitialized = 0; return false; } if (!hasParentFullpath()) { memoizedIsInitialized = 0; return false; } if (!hasIsDeleted()) { memoizedIsInitialized = 0; return false; } if (!getDirStatus().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getDirStatus()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, snapshotID_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, parentFullpath_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBool(4, isDeleted_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getDirStatus()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, snapshotID_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, parentFullpath_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(4, isDeleted_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto) obj; if (hasDirStatus() != other.hasDirStatus()) return false; if (hasDirStatus()) { if (!getDirStatus() .equals(other.getDirStatus())) return false; } if (hasSnapshotID() != other.hasSnapshotID()) return false; if (hasSnapshotID()) { if (getSnapshotID() != other.getSnapshotID()) return false; } if (hasParentFullpath() != other.hasParentFullpath()) return false; if (hasParentFullpath()) { if (!getParentFullpath() .equals(other.getParentFullpath())) return false; } if (hasIsDeleted() != other.hasIsDeleted()) return false; if (hasIsDeleted()) { if (getIsDeleted() != other.getIsDeleted()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasDirStatus()) { hash = (37 * hash) + DIRSTATUS_FIELD_NUMBER; hash = (53 * hash) + getDirStatus().hashCode(); } if (hasSnapshotID()) { hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER; hash = (53 * hash) + getSnapshotID(); } if (hasParentFullpath()) { hash = (37 * hash) + PARENT_FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getParentFullpath().hashCode(); } if (hasIsDeleted()) { hash = (37 * hash) + ISDELETED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsDeleted()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Status of a snapshot directory: besides the normal information for
     * a directory status, also include snapshot ID, and
     * the full path of the parent directory.
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotStatusProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotStatusProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getDirStatusFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; dirStatus_ = null; if (dirStatusBuilder_ != null) { dirStatusBuilder_.dispose(); dirStatusBuilder_ = null; } snapshotID_ = 0; parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; isDeleted_ = false; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.dirStatus_ = dirStatusBuilder_ == null ? dirStatus_ : dirStatusBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.snapshotID_ = snapshotID_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.parentFullpath_ = parentFullpath_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.isDeleted_ = isDeleted_; to_bitField0_ |= 0x00000008; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance()) return this; if (other.hasDirStatus()) { mergeDirStatus(other.getDirStatus()); } if (other.hasSnapshotID()) { setSnapshotID(other.getSnapshotID()); } if (other.hasParentFullpath()) { setParentFullpath(other.getParentFullpath()); } if (other.hasIsDeleted()) { setIsDeleted(other.getIsDeleted()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasDirStatus()) { return false; } if (!hasSnapshotID()) { return false; } if (!hasParentFullpath()) { return false; } if (!hasIsDeleted()) { return false; } if (!getDirStatus().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { input.readMessage( getDirStatusFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000001; break; } // case 10 case 16: { snapshotID_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { parentFullpath_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 32: { isDeleted_ = input.readBool(); bitField0_ |= 0x00000008; break; } // case 32 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> dirStatusBuilder_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return Whether the dirStatus field is set. */ public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; * @return The dirStatus. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { if (dirStatusBuilder_ == null) { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } else { return dirStatusBuilder_.getMessage(); } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dirStatus_ = value; } else { dirStatusBuilder_.setMessage(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (dirStatusBuilder_ == null) { dirStatus_ = builderForValue.build(); } else { dirStatusBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder mergeDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && dirStatus_ != null && dirStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { getDirStatusBuilder().mergeFrom(value); } else { dirStatus_ = value; } } else { dirStatusBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder clearDirStatus() { bitField0_ = (bitField0_ & ~0x00000001); dirStatus_ = null; if (dirStatusBuilder_ != null) { dirStatusBuilder_.dispose(); dirStatusBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getDirStatusBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDirStatusFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { if (dirStatusBuilder_ != null) { return dirStatusBuilder_.getMessageOrBuilder(); } else { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getDirStatusFieldBuilder() { if (dirStatusBuilder_ == null) { dirStatusBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( getDirStatus(), getParentForChildren(), isClean()); dirStatus_ = null; } return dirStatusBuilder_; } private int snapshotID_ ; /** *
       * Fields specific for snapshot directory
       * 
* * required uint32 snapshotID = 2; * @return Whether the snapshotID field is set. */ @java.lang.Override public boolean hasSnapshotID() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Fields specific for snapshot directory
       * 
* * required uint32 snapshotID = 2; * @return The snapshotID. */ @java.lang.Override public int getSnapshotID() { return snapshotID_; } /** *
       * Fields specific for snapshot directory
       * 
* * required uint32 snapshotID = 2; * @param value The snapshotID to set. * @return This builder for chaining. */ public Builder setSnapshotID(int value) { snapshotID_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** *
       * Fields specific for snapshot directory
       * 
* * required uint32 snapshotID = 2; * @return This builder for chaining. */ public Builder clearSnapshotID() { bitField0_ = (bitField0_ & ~0x00000002); snapshotID_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes parent_fullpath = 3; * @return Whether the parentFullpath field is set. */ @java.lang.Override public boolean hasParentFullpath() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes parent_fullpath = 3; * @return The parentFullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() { return parentFullpath_; } /** * required bytes parent_fullpath = 3; * @param value The parentFullpath to set. * @return This builder for chaining. */ public Builder setParentFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } parentFullpath_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required bytes parent_fullpath = 3; * @return This builder for chaining. */ public Builder clearParentFullpath() { bitField0_ = (bitField0_ & ~0x00000004); parentFullpath_ = getDefaultInstance().getParentFullpath(); onChanged(); return this; } private boolean isDeleted_ ; /** * required bool isDeleted = 4; * @return Whether the isDeleted field is set. */ @java.lang.Override public boolean hasIsDeleted() { return ((bitField0_ & 0x00000008) != 0); } /** * required bool isDeleted = 4; * @return The isDeleted. */ @java.lang.Override public boolean getIsDeleted() { return isDeleted_; } /** * required bool isDeleted = 4; * @param value The isDeleted to set. * @return This builder for chaining. */ public Builder setIsDeleted(boolean value) { isDeleted_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required bool isDeleted = 4; * @return This builder for chaining. */ public Builder clearIsDeleted() { bitField0_ = (bitField0_ & ~0x00000008); isDeleted_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotStatusProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotStatusProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotStatusProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshottableDirectoryListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshottableDirectoryListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ java.util.List getSnapshottableDirListingList(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ int getSnapshottableDirListingCount(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ java.util.List getSnapshottableDirListingOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index); } /** *
   **
   * Snapshottable directory listing
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto} */ public static final class SnapshottableDirectoryListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshottableDirectoryListingProto) SnapshottableDirectoryListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshottableDirectoryListingProto.newBuilder() to construct. private SnapshottableDirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshottableDirectoryListingProto() { snapshottableDirListing_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshottableDirectoryListingProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class); } public static final int SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List snapshottableDirListing_; /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ @java.lang.Override public java.util.List getSnapshottableDirListingList() { return snapshottableDirListing_; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ @java.lang.Override public java.util.List getSnapshottableDirListingOrBuilderList() { return snapshottableDirListing_; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ @java.lang.Override public int getSnapshottableDirListingCount() { return snapshottableDirListing_.size(); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) { return snapshottableDirListing_.get(index); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index) { return snapshottableDirListing_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getSnapshottableDirListingCount(); i++) { if (!getSnapshottableDirListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < snapshottableDirListing_.size(); i++) { output.writeMessage(1, snapshottableDirListing_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < snapshottableDirListing_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, snapshottableDirListing_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) obj; if (!getSnapshottableDirListingList() .equals(other.getSnapshottableDirListingList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getSnapshottableDirListingCount() > 0) { hash = (37 * hash) + SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER; hash = (53 * hash) + getSnapshottableDirListingList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshottable directory listing
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshottableDirectoryListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (snapshottableDirListingBuilder_ == null) { snapshottableDirListing_ = java.util.Collections.emptyList(); } else { snapshottableDirListing_ = null; snapshottableDirListingBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result) { if (snapshottableDirListingBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.snapshottableDirListing_ = snapshottableDirListing_; } else { result.snapshottableDirListing_ = snapshottableDirListingBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result) { int from_bitField0_ = bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance()) return this; if (snapshottableDirListingBuilder_ == null) { if (!other.snapshottableDirListing_.isEmpty()) { if (snapshottableDirListing_.isEmpty()) { snapshottableDirListing_ = other.snapshottableDirListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.addAll(other.snapshottableDirListing_); } onChanged(); } } else { if (!other.snapshottableDirListing_.isEmpty()) { if (snapshottableDirListingBuilder_.isEmpty()) { snapshottableDirListingBuilder_.dispose(); snapshottableDirListingBuilder_ = null; snapshottableDirListing_ = other.snapshottableDirListing_; bitField0_ = (bitField0_ & ~0x00000001); snapshottableDirListingBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSnapshottableDirListingFieldBuilder() : null; } else { snapshottableDirListingBuilder_.addAllMessages(other.snapshottableDirListing_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getSnapshottableDirListingCount(); i++) { if (!getSnapshottableDirListing(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.PARSER, extensionRegistry); if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(m); } else { snapshottableDirListingBuilder_.addMessage(m); } break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List snapshottableDirListing_ = java.util.Collections.emptyList(); private void ensureSnapshottableDirListingIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { snapshottableDirListing_ = new java.util.ArrayList(snapshottableDirListing_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> snapshottableDirListingBuilder_; /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingList() { if (snapshottableDirListingBuilder_ == null) { return java.util.Collections.unmodifiableList(snapshottableDirListing_); } else { return snapshottableDirListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public int getSnapshottableDirListingCount() { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.size(); } else { return snapshottableDirListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.get(index); } else { return snapshottableDirListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder setSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.set(index, value); onChanged(); } else { snapshottableDirListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder setSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.set(index, builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(value); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(index, value); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(index, builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addAllSnapshottableDirListing( java.lang.Iterable values) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, snapshottableDirListing_); onChanged(); } else { snapshottableDirListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder clearSnapshottableDirListing() { if (snapshottableDirListingBuilder_ == null) { snapshottableDirListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { snapshottableDirListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder removeSnapshottableDirListing(int index) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.remove(index); onChanged(); } else { snapshottableDirListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder getSnapshottableDirListingBuilder( int index) { return getSnapshottableDirListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index) { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.get(index); } else { return snapshottableDirListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingOrBuilderList() { if (snapshottableDirListingBuilder_ != null) { return snapshottableDirListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(snapshottableDirListing_); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder() { return getSnapshottableDirListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder( int index) { return getSnapshottableDirListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingBuilderList() { return getSnapshottableDirListingFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> getSnapshottableDirListingFieldBuilder() { if (snapshottableDirListingBuilder_ == null) { snapshottableDirListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>( snapshottableDirListing_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); snapshottableDirListing_ = null; } return snapshottableDirListingBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshottableDirectoryListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ java.util.List getSnapshotListingList(); /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getSnapshotListing(int index); /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ int getSnapshotListingCount(); /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ java.util.List getSnapshotListingOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder getSnapshotListingOrBuilder( int index); } /** *
   **
   * Snapshot listing
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotListingProto} */ public static final class SnapshotListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotListingProto) SnapshotListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotListingProto.newBuilder() to construct. private SnapshotListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotListingProto() { snapshotListing_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotListingProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.Builder.class); } public static final int SNAPSHOTLISTING_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List snapshotListing_; /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ @java.lang.Override public java.util.List getSnapshotListingList() { return snapshotListing_; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ @java.lang.Override public java.util.List getSnapshotListingOrBuilderList() { return snapshotListing_; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ @java.lang.Override public int getSnapshotListingCount() { return snapshotListing_.size(); } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getSnapshotListing(int index) { return snapshotListing_.get(index); } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder getSnapshotListingOrBuilder( int index) { return snapshotListing_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getSnapshotListingCount(); i++) { if (!getSnapshotListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < snapshotListing_.size(); i++) { output.writeMessage(1, snapshotListing_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < snapshotListing_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, snapshotListing_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto) obj; if (!getSnapshotListingList() .equals(other.getSnapshotListingList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getSnapshotListingCount() > 0) { hash = (37 * hash) + SNAPSHOTLISTING_FIELD_NUMBER; hash = (53 * hash) + getSnapshotListingList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot listing
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (snapshotListingBuilder_ == null) { snapshotListing_ = java.util.Collections.emptyList(); } else { snapshotListing_ = null; snapshotListingBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result) { if (snapshotListingBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { snapshotListing_ = java.util.Collections.unmodifiableList(snapshotListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.snapshotListing_ = snapshotListing_; } else { result.snapshotListing_ = snapshotListingBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result) { int from_bitField0_ = bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.getDefaultInstance()) return this; if (snapshotListingBuilder_ == null) { if (!other.snapshotListing_.isEmpty()) { if (snapshotListing_.isEmpty()) { snapshotListing_ = other.snapshotListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureSnapshotListingIsMutable(); snapshotListing_.addAll(other.snapshotListing_); } onChanged(); } } else { if (!other.snapshotListing_.isEmpty()) { if (snapshotListingBuilder_.isEmpty()) { snapshotListingBuilder_.dispose(); snapshotListingBuilder_ = null; snapshotListing_ = other.snapshotListing_; bitField0_ = (bitField0_ & ~0x00000001); snapshotListingBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSnapshotListingFieldBuilder() : null; } else { snapshotListingBuilder_.addAllMessages(other.snapshotListing_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getSnapshotListingCount(); i++) { if (!getSnapshotListing(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.PARSER, extensionRegistry); if (snapshotListingBuilder_ == null) { ensureSnapshotListingIsMutable(); snapshotListing_.add(m); } else { snapshotListingBuilder_.addMessage(m); } break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List snapshotListing_ = java.util.Collections.emptyList(); private void ensureSnapshotListingIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { snapshotListing_ = new java.util.ArrayList(snapshotListing_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder> snapshotListingBuilder_; /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public java.util.List getSnapshotListingList() { if (snapshotListingBuilder_ == null) { return java.util.Collections.unmodifiableList(snapshotListing_); } else { return snapshotListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public int getSnapshotListingCount() { if (snapshotListingBuilder_ == null) { return snapshotListing_.size(); } else { return snapshotListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getSnapshotListing(int index) { if (snapshotListingBuilder_ == null) { return snapshotListing_.get(index); } else { return snapshotListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder setSnapshotListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto value) { if (snapshotListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshotListingIsMutable(); snapshotListing_.set(index, value); onChanged(); } else { snapshotListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder setSnapshotListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder builderForValue) { if (snapshotListingBuilder_ == null) { ensureSnapshotListingIsMutable(); snapshotListing_.set(index, builderForValue.build()); onChanged(); } else { snapshotListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder addSnapshotListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto value) { if (snapshotListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshotListingIsMutable(); snapshotListing_.add(value); onChanged(); } else { snapshotListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder addSnapshotListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto value) { if (snapshotListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshotListingIsMutable(); snapshotListing_.add(index, value); onChanged(); } else { snapshotListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder addSnapshotListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder builderForValue) { if (snapshotListingBuilder_ == null) { ensureSnapshotListingIsMutable(); snapshotListing_.add(builderForValue.build()); onChanged(); } else { snapshotListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder addSnapshotListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder builderForValue) { if (snapshotListingBuilder_ == null) { ensureSnapshotListingIsMutable(); snapshotListing_.add(index, builderForValue.build()); onChanged(); } else { snapshotListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder addAllSnapshotListing( java.lang.Iterable values) { if (snapshotListingBuilder_ == null) { ensureSnapshotListingIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, snapshotListing_); onChanged(); } else { snapshotListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder clearSnapshotListing() { if (snapshotListingBuilder_ == null) { snapshotListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { snapshotListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public Builder removeSnapshotListing(int index) { if (snapshotListingBuilder_ == null) { ensureSnapshotListingIsMutable(); snapshotListing_.remove(index); onChanged(); } else { snapshotListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder getSnapshotListingBuilder( int index) { return getSnapshotListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder getSnapshotListingOrBuilder( int index) { if (snapshotListingBuilder_ == null) { return snapshotListing_.get(index); } else { return snapshotListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public java.util.List getSnapshotListingOrBuilderList() { if (snapshotListingBuilder_ != null) { return snapshotListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(snapshotListing_); } } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder addSnapshotListingBuilder() { return getSnapshotListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder addSnapshotListingBuilder( int index) { return getSnapshotListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1; */ public java.util.List getSnapshotListingBuilderList() { return getSnapshotListingFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder> getSnapshotListingFieldBuilder() { if (snapshotListingBuilder_ == null) { snapshotListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder>( snapshotListing_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); snapshotListing_ = null; } return snapshotListingBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportEntryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportEntryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes fullpath = 1; * @return Whether the fullpath field is set. */ boolean hasFullpath(); /** * required bytes fullpath = 1; * @return The fullpath. */ org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath(); /** * required string modificationLabel = 2; * @return Whether the modificationLabel field is set. */ boolean hasModificationLabel(); /** * required string modificationLabel = 2; * @return The modificationLabel. */ java.lang.String getModificationLabel(); /** * required string modificationLabel = 2; * @return The bytes for modificationLabel. */ org.apache.hadoop.thirdparty.protobuf.ByteString getModificationLabelBytes(); /** * optional bytes targetPath = 3; * @return Whether the targetPath field is set. */ boolean hasTargetPath(); /** * optional bytes targetPath = 3; * @return The targetPath. */ org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath(); } /** *
   **
   * Snapshot diff report entry
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto} */ public static final class SnapshotDiffReportEntryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportEntryProto) SnapshotDiffReportEntryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportEntryProto.newBuilder() to construct. private SnapshotDiffReportEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportEntryProto() { fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; modificationLabel_ = ""; targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotDiffReportEntryProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class); } private int bitField0_; public static final int FULLPATH_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes fullpath = 1; * @return Whether the fullpath field is set. */ @java.lang.Override public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; * @return The fullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } public static final int MODIFICATIONLABEL_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object modificationLabel_ = ""; /** * required string modificationLabel = 2; * @return Whether the modificationLabel field is set. */ @java.lang.Override public boolean hasModificationLabel() { return ((bitField0_ & 0x00000002) != 0); } /** * required string modificationLabel = 2; * @return The modificationLabel. */ @java.lang.Override public java.lang.String getModificationLabel() { java.lang.Object ref = modificationLabel_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { modificationLabel_ = s; } return s; } } /** * required string modificationLabel = 2; * @return The bytes for modificationLabel. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getModificationLabelBytes() { java.lang.Object ref = modificationLabel_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); modificationLabel_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int TARGETPATH_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes targetPath = 3; * @return Whether the targetPath field is set. */ @java.lang.Override public boolean hasTargetPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes targetPath = 3; * @return The targetPath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFullpath()) { memoizedIsInitialized = 0; return false; } if (!hasModificationLabel()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, modificationLabel_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, targetPath_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, modificationLabel_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, targetPath_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) obj; if (hasFullpath() != other.hasFullpath()) return false; if (hasFullpath()) { if (!getFullpath() .equals(other.getFullpath())) return false; } if (hasModificationLabel() != other.hasModificationLabel()) return false; if (hasModificationLabel()) { if (!getModificationLabel() .equals(other.getModificationLabel())) return false; } if (hasTargetPath() != other.hasTargetPath()) return false; if (hasTargetPath()) { if (!getTargetPath() .equals(other.getTargetPath())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFullpath()) { hash = (37 * hash) + FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getFullpath().hashCode(); } if (hasModificationLabel()) { hash = (37 * hash) + MODIFICATIONLABEL_FIELD_NUMBER; hash = (53 * hash) + getModificationLabel().hashCode(); } if (hasTargetPath()) { hash = (37 * hash) + TARGETPATH_FIELD_NUMBER; hash = (53 * hash) + getTargetPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report entry
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportEntryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; modificationLabel_ = ""; targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fullpath_ = fullpath_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.modificationLabel_ = modificationLabel_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.targetPath_ = targetPath_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()) return this; if (other.hasFullpath()) { setFullpath(other.getFullpath()); } if (other.hasModificationLabel()) { modificationLabel_ = other.modificationLabel_; bitField0_ |= 0x00000002; onChanged(); } if (other.hasTargetPath()) { setTargetPath(other.getTargetPath()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFullpath()) { return false; } if (!hasModificationLabel()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { fullpath_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { modificationLabel_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { targetPath_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes fullpath = 1; * @return Whether the fullpath field is set. */ @java.lang.Override public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; * @return The fullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } /** * required bytes fullpath = 1; * @param value The fullpath to set. * @return This builder for chaining. */ public Builder setFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } fullpath_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required bytes fullpath = 1; * @return This builder for chaining. */ public Builder clearFullpath() { bitField0_ = (bitField0_ & ~0x00000001); fullpath_ = getDefaultInstance().getFullpath(); onChanged(); return this; } private java.lang.Object modificationLabel_ = ""; /** * required string modificationLabel = 2; * @return Whether the modificationLabel field is set. */ public boolean hasModificationLabel() { return ((bitField0_ & 0x00000002) != 0); } /** * required string modificationLabel = 2; * @return The modificationLabel. */ public java.lang.String getModificationLabel() { java.lang.Object ref = modificationLabel_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { modificationLabel_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string modificationLabel = 2; * @return The bytes for modificationLabel. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getModificationLabelBytes() { java.lang.Object ref = modificationLabel_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); modificationLabel_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string modificationLabel = 2; * @param value The modificationLabel to set. * @return This builder for chaining. */ public Builder setModificationLabel( java.lang.String value) { if (value == null) { throw new NullPointerException(); } modificationLabel_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string modificationLabel = 2; * @return This builder for chaining. */ public Builder clearModificationLabel() { modificationLabel_ = getDefaultInstance().getModificationLabel(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string modificationLabel = 2; * @param value The bytes for modificationLabel to set. * @return This builder for chaining. */ public Builder setModificationLabelBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } modificationLabel_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes targetPath = 3; * @return Whether the targetPath field is set. */ @java.lang.Override public boolean hasTargetPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes targetPath = 3; * @return The targetPath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } /** * optional bytes targetPath = 3; * @param value The targetPath to set. * @return This builder for chaining. */ public Builder setTargetPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } targetPath_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional bytes targetPath = 3; * @return This builder for chaining. */ public Builder clearTargetPath() { bitField0_ = (bitField0_ & ~0x00000004); targetPath_ = getDefaultInstance().getTargetPath(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportEntryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportEntryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportEntryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; * @return Whether the snapshotRoot field is set. */ boolean hasSnapshotRoot(); /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; * @return The snapshotRoot. */ java.lang.String getSnapshotRoot(); /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; * @return The bytes for snapshotRoot. */ org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes(); /** * required string fromSnapshot = 2; * @return Whether the fromSnapshot field is set. */ boolean hasFromSnapshot(); /** * required string fromSnapshot = 2; * @return The fromSnapshot. */ java.lang.String getFromSnapshot(); /** * required string fromSnapshot = 2; * @return The bytes for fromSnapshot. */ org.apache.hadoop.thirdparty.protobuf.ByteString getFromSnapshotBytes(); /** * required string toSnapshot = 3; * @return Whether the toSnapshot field is set. */ boolean hasToSnapshot(); /** * required string toSnapshot = 3; * @return The toSnapshot. */ java.lang.String getToSnapshot(); /** * required string toSnapshot = 3; * @return The bytes for toSnapshot. */ org.apache.hadoop.thirdparty.protobuf.ByteString getToSnapshotBytes(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ java.util.List getDiffReportEntriesList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ int getDiffReportEntriesCount(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ java.util.List getDiffReportEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index); } /** *
   **
   * Snapshot diff report
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto} */ public static final class SnapshotDiffReportProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportProto) SnapshotDiffReportProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportProto.newBuilder() to construct. private SnapshotDiffReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportProto() { snapshotRoot_ = ""; fromSnapshot_ = ""; toSnapshot_ = ""; diffReportEntries_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotDiffReportProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class); } private int bitField0_; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object snapshotRoot_ = ""; /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; * @return Whether the snapshotRoot field is set. */ @java.lang.Override public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; * @return The snapshotRoot. */ @java.lang.Override public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; * @return The bytes for snapshotRoot. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int FROMSNAPSHOT_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object fromSnapshot_ = ""; /** * required string fromSnapshot = 2; * @return Whether the fromSnapshot field is set. */ @java.lang.Override public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string fromSnapshot = 2; * @return The fromSnapshot. */ @java.lang.Override public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromSnapshot_ = s; } return s; } } /** * required string fromSnapshot = 2; * @return The bytes for fromSnapshot. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int TOSNAPSHOT_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object toSnapshot_ = ""; /** * required string toSnapshot = 3; * @return Whether the toSnapshot field is set. */ @java.lang.Override public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) != 0); } /** * required string toSnapshot = 3; * @return The toSnapshot. */ @java.lang.Override public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { toSnapshot_ = s; } return s; } } /** * required string toSnapshot = 3; * @return The bytes for toSnapshot. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int DIFFREPORTENTRIES_FIELD_NUMBER = 4; @SuppressWarnings("serial") private java.util.List diffReportEntries_; /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ @java.lang.Override public java.util.List getDiffReportEntriesList() { return diffReportEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ @java.lang.Override public java.util.List getDiffReportEntriesOrBuilderList() { return diffReportEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ @java.lang.Override public int getDiffReportEntriesCount() { return diffReportEntries_.size(); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) { return diffReportEntries_.get(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index) { return diffReportEntries_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasFromSnapshot()) { memoizedIsInitialized = 0; return false; } if (!hasToSnapshot()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getDiffReportEntriesCount(); i++) { if (!getDiffReportEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, snapshotRoot_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, fromSnapshot_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, toSnapshot_); } for (int i = 0; i < diffReportEntries_.size(); i++) { output.writeMessage(4, diffReportEntries_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, snapshotRoot_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, fromSnapshot_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, toSnapshot_); } for (int i = 0; i < diffReportEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, diffReportEntries_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) obj; if (hasSnapshotRoot() != other.hasSnapshotRoot()) return false; if (hasSnapshotRoot()) { if (!getSnapshotRoot() .equals(other.getSnapshotRoot())) return false; } if (hasFromSnapshot() != other.hasFromSnapshot()) return false; if (hasFromSnapshot()) { if (!getFromSnapshot() .equals(other.getFromSnapshot())) return false; } if (hasToSnapshot() != other.hasToSnapshot()) return false; if (hasToSnapshot()) { if (!getToSnapshot() .equals(other.getToSnapshot())) return false; } if (!getDiffReportEntriesList() .equals(other.getDiffReportEntriesList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasFromSnapshot()) { hash = (37 * hash) + FROMSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getFromSnapshot().hashCode(); } if (hasToSnapshot()) { hash = (37 * hash) + TOSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getToSnapshot().hashCode(); } if (getDiffReportEntriesCount() > 0) { hash = (37 * hash) + DIFFREPORTENTRIES_FIELD_NUMBER; hash = (53 * hash) + getDiffReportEntriesList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; snapshotRoot_ = ""; fromSnapshot_ = ""; toSnapshot_ = ""; if (diffReportEntriesBuilder_ == null) { diffReportEntries_ = java.util.Collections.emptyList(); } else { diffReportEntries_ = null; diffReportEntriesBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result) { if (diffReportEntriesBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0)) { diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_); bitField0_ = (bitField0_ & ~0x00000008); } result.diffReportEntries_ = diffReportEntries_; } else { result.diffReportEntries_ = diffReportEntriesBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.snapshotRoot_ = snapshotRoot_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.fromSnapshot_ = fromSnapshot_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.toSnapshot_ = toSnapshot_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { snapshotRoot_ = other.snapshotRoot_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasFromSnapshot()) { fromSnapshot_ = other.fromSnapshot_; bitField0_ |= 0x00000002; onChanged(); } if (other.hasToSnapshot()) { toSnapshot_ = other.toSnapshot_; bitField0_ |= 0x00000004; onChanged(); } if (diffReportEntriesBuilder_ == null) { if (!other.diffReportEntries_.isEmpty()) { if (diffReportEntries_.isEmpty()) { diffReportEntries_ = other.diffReportEntries_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureDiffReportEntriesIsMutable(); diffReportEntries_.addAll(other.diffReportEntries_); } onChanged(); } } else { if (!other.diffReportEntries_.isEmpty()) { if (diffReportEntriesBuilder_.isEmpty()) { diffReportEntriesBuilder_.dispose(); diffReportEntriesBuilder_ = null; diffReportEntries_ = other.diffReportEntries_; bitField0_ = (bitField0_ & ~0x00000008); diffReportEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getDiffReportEntriesFieldBuilder() : null; } else { diffReportEntriesBuilder_.addAllMessages(other.diffReportEntries_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } if (!hasFromSnapshot()) { return false; } if (!hasToSnapshot()) { return false; } for (int i = 0; i < getDiffReportEntriesCount(); i++) { if (!getDiffReportEntries(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { snapshotRoot_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { fromSnapshot_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { toSnapshot_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.PARSER, extensionRegistry); if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(m); } else { diffReportEntriesBuilder_.addMessage(m); } break; } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object snapshotRoot_ = ""; /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; * @return Whether the snapshotRoot field is set. */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; * @return The snapshotRoot. */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; * @return The bytes for snapshotRoot. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; * @param value The snapshotRoot to set. * @return This builder for chaining. */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } snapshotRoot_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; * @return This builder for chaining. */ public Builder clearSnapshotRoot() { snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; * @param value The bytes for snapshotRoot to set. * @return This builder for chaining. */ public Builder setSnapshotRootBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } snapshotRoot_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private java.lang.Object fromSnapshot_ = ""; /** * required string fromSnapshot = 2; * @return Whether the fromSnapshot field is set. */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string fromSnapshot = 2; * @return The fromSnapshot. */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromSnapshot_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string fromSnapshot = 2; * @return The bytes for fromSnapshot. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string fromSnapshot = 2; * @param value The fromSnapshot to set. * @return This builder for chaining. */ public Builder setFromSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } fromSnapshot_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string fromSnapshot = 2; * @return This builder for chaining. */ public Builder clearFromSnapshot() { fromSnapshot_ = getDefaultInstance().getFromSnapshot(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string fromSnapshot = 2; * @param value The bytes for fromSnapshot to set. * @return This builder for chaining. */ public Builder setFromSnapshotBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } fromSnapshot_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private java.lang.Object toSnapshot_ = ""; /** * required string toSnapshot = 3; * @return Whether the toSnapshot field is set. */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) != 0); } /** * required string toSnapshot = 3; * @return The toSnapshot. */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { toSnapshot_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string toSnapshot = 3; * @return The bytes for toSnapshot. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string toSnapshot = 3; * @param value The toSnapshot to set. * @return This builder for chaining. */ public Builder setToSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } toSnapshot_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required string toSnapshot = 3; * @return This builder for chaining. */ public Builder clearToSnapshot() { toSnapshot_ = getDefaultInstance().getToSnapshot(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * required string toSnapshot = 3; * @param value The bytes for toSnapshot to set. * @return This builder for chaining. */ public Builder setToSnapshotBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } toSnapshot_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } private java.util.List diffReportEntries_ = java.util.Collections.emptyList(); private void ensureDiffReportEntriesIsMutable() { if (!((bitField0_ & 0x00000008) != 0)) { diffReportEntries_ = new java.util.ArrayList(diffReportEntries_); bitField0_ |= 0x00000008; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> diffReportEntriesBuilder_; /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesList() { if (diffReportEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(diffReportEntries_); } else { return diffReportEntriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public int getDiffReportEntriesCount() { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.size(); } else { return diffReportEntriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.get(index); } else { return diffReportEntriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder setDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.set(index, value); onChanged(); } else { diffReportEntriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder setDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.set(index, builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(value); onChanged(); } else { diffReportEntriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(index, value); onChanged(); } else { diffReportEntriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(index, builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addAllDiffReportEntries( java.lang.Iterable values) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, diffReportEntries_); onChanged(); } else { diffReportEntriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder clearDiffReportEntries() { if (diffReportEntriesBuilder_ == null) { diffReportEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { diffReportEntriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder removeDiffReportEntries(int index) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.remove(index); onChanged(); } else { diffReportEntriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder getDiffReportEntriesBuilder( int index) { return getDiffReportEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index) { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.get(index); } else { return diffReportEntriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesOrBuilderList() { if (diffReportEntriesBuilder_ != null) { return diffReportEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(diffReportEntries_); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder() { return getDiffReportEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder( int index) { return getDiffReportEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesBuilderList() { return getDiffReportEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> getDiffReportEntriesFieldBuilder() { if (diffReportEntriesBuilder_ == null) { diffReportEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>( diffReportEntries_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); diffReportEntries_ = null; } return diffReportEntriesBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportListingEntryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportListingEntryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes fullpath = 1; * @return Whether the fullpath field is set. */ boolean hasFullpath(); /** * required bytes fullpath = 1; * @return The fullpath. */ org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath(); /** * required uint64 dirId = 2; * @return Whether the dirId field is set. */ boolean hasDirId(); /** * required uint64 dirId = 2; * @return The dirId. */ long getDirId(); /** * required bool isReference = 3; * @return Whether the isReference field is set. */ boolean hasIsReference(); /** * required bool isReference = 3; * @return The isReference. */ boolean getIsReference(); /** * optional bytes targetPath = 4; * @return Whether the targetPath field is set. */ boolean hasTargetPath(); /** * optional bytes targetPath = 4; * @return The targetPath. */ org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath(); /** * optional uint64 fileId = 5; * @return Whether the fileId field is set. */ boolean hasFileId(); /** * optional uint64 fileId = 5; * @return The fileId. */ long getFileId(); } /** *
   **
   * Snapshot diff report listing entry
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingEntryProto} */ public static final class SnapshotDiffReportListingEntryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportListingEntryProto) SnapshotDiffReportListingEntryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportListingEntryProto.newBuilder() to construct. private SnapshotDiffReportListingEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportListingEntryProto() { fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotDiffReportListingEntryProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder.class); } private int bitField0_; public static final int FULLPATH_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes fullpath = 1; * @return Whether the fullpath field is set. */ @java.lang.Override public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; * @return The fullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } public static final int DIRID_FIELD_NUMBER = 2; private long dirId_ = 0L; /** * required uint64 dirId = 2; * @return Whether the dirId field is set. */ @java.lang.Override public boolean hasDirId() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 dirId = 2; * @return The dirId. */ @java.lang.Override public long getDirId() { return dirId_; } public static final int ISREFERENCE_FIELD_NUMBER = 3; private boolean isReference_ = false; /** * required bool isReference = 3; * @return Whether the isReference field is set. */ @java.lang.Override public boolean hasIsReference() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool isReference = 3; * @return The isReference. */ @java.lang.Override public boolean getIsReference() { return isReference_; } public static final int TARGETPATH_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes targetPath = 4; * @return Whether the targetPath field is set. */ @java.lang.Override public boolean hasTargetPath() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes targetPath = 4; * @return The targetPath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } public static final int FILEID_FIELD_NUMBER = 5; private long fileId_ = 0L; /** * optional uint64 fileId = 5; * @return Whether the fileId field is set. */ @java.lang.Override public boolean hasFileId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 fileId = 5; * @return The fileId. */ @java.lang.Override public long getFileId() { return fileId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFullpath()) { memoizedIsInitialized = 0; return false; } if (!hasDirId()) { memoizedIsInitialized = 0; return false; } if (!hasIsReference()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, dirId_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBool(3, isReference_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, targetPath_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, fileId_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, dirId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(3, isReference_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, targetPath_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, fileId_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto) obj; if (hasFullpath() != other.hasFullpath()) return false; if (hasFullpath()) { if (!getFullpath() .equals(other.getFullpath())) return false; } if (hasDirId() != other.hasDirId()) return false; if (hasDirId()) { if (getDirId() != other.getDirId()) return false; } if (hasIsReference() != other.hasIsReference()) return false; if (hasIsReference()) { if (getIsReference() != other.getIsReference()) return false; } if (hasTargetPath() != other.hasTargetPath()) return false; if (hasTargetPath()) { if (!getTargetPath() .equals(other.getTargetPath())) return false; } if (hasFileId() != other.hasFileId()) return false; if (hasFileId()) { if (getFileId() != other.getFileId()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFullpath()) { hash = (37 * hash) + FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getFullpath().hashCode(); } if (hasDirId()) { hash = (37 * hash) + DIRID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDirId()); } if (hasIsReference()) { hash = (37 * hash) + ISREFERENCE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsReference()); } if (hasTargetPath()) { hash = (37 * hash) + TARGETPATH_FIELD_NUMBER; hash = (53 * hash) + getTargetPath().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report listing entry
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingEntryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportListingEntryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; dirId_ = 0L; isReference_ = false; targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; fileId_ = 0L; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fullpath_ = fullpath_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.dirId_ = dirId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.isReference_ = isReference_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.targetPath_ = targetPath_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.fileId_ = fileId_; to_bitField0_ |= 0x00000010; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()) return this; if (other.hasFullpath()) { setFullpath(other.getFullpath()); } if (other.hasDirId()) { setDirId(other.getDirId()); } if (other.hasIsReference()) { setIsReference(other.getIsReference()); } if (other.hasTargetPath()) { setTargetPath(other.getTargetPath()); } if (other.hasFileId()) { setFileId(other.getFileId()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFullpath()) { return false; } if (!hasDirId()) { return false; } if (!hasIsReference()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { fullpath_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { dirId_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { isReference_ = input.readBool(); bitField0_ |= 0x00000004; break; } // case 24 case 34: { targetPath_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 case 40: { fileId_ = input.readUInt64(); bitField0_ |= 0x00000010; break; } // case 40 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes fullpath = 1; * @return Whether the fullpath field is set. */ @java.lang.Override public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; * @return The fullpath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } /** * required bytes fullpath = 1; * @param value The fullpath to set. * @return This builder for chaining. */ public Builder setFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } fullpath_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required bytes fullpath = 1; * @return This builder for chaining. */ public Builder clearFullpath() { bitField0_ = (bitField0_ & ~0x00000001); fullpath_ = getDefaultInstance().getFullpath(); onChanged(); return this; } private long dirId_ ; /** * required uint64 dirId = 2; * @return Whether the dirId field is set. */ @java.lang.Override public boolean hasDirId() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 dirId = 2; * @return The dirId. */ @java.lang.Override public long getDirId() { return dirId_; } /** * required uint64 dirId = 2; * @param value The dirId to set. * @return This builder for chaining. */ public Builder setDirId(long value) { dirId_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint64 dirId = 2; * @return This builder for chaining. */ public Builder clearDirId() { bitField0_ = (bitField0_ & ~0x00000002); dirId_ = 0L; onChanged(); return this; } private boolean isReference_ ; /** * required bool isReference = 3; * @return Whether the isReference field is set. */ @java.lang.Override public boolean hasIsReference() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool isReference = 3; * @return The isReference. */ @java.lang.Override public boolean getIsReference() { return isReference_; } /** * required bool isReference = 3; * @param value The isReference to set. * @return This builder for chaining. */ public Builder setIsReference(boolean value) { isReference_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required bool isReference = 3; * @return This builder for chaining. */ public Builder clearIsReference() { bitField0_ = (bitField0_ & ~0x00000004); isReference_ = false; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes targetPath = 4; * @return Whether the targetPath field is set. */ @java.lang.Override public boolean hasTargetPath() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes targetPath = 4; * @return The targetPath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } /** * optional bytes targetPath = 4; * @param value The targetPath to set. * @return This builder for chaining. */ public Builder setTargetPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } targetPath_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional bytes targetPath = 4; * @return This builder for chaining. */ public Builder clearTargetPath() { bitField0_ = (bitField0_ & ~0x00000008); targetPath_ = getDefaultInstance().getTargetPath(); onChanged(); return this; } private long fileId_ ; /** * optional uint64 fileId = 5; * @return Whether the fileId field is set. */ @java.lang.Override public boolean hasFileId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 fileId = 5; * @return The fileId. */ @java.lang.Override public long getFileId() { return fileId_; } /** * optional uint64 fileId = 5; * @param value The fileId to set. * @return This builder for chaining. */ public Builder setFileId(long value) { fileId_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional uint64 fileId = 5; * @return This builder for chaining. */ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000010); fileId_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportListingEntryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportListingEntryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportListingEntryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportCursorProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportCursorProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes startPath = 1; * @return Whether the startPath field is set. */ boolean hasStartPath(); /** * required bytes startPath = 1; * @return The startPath. */ org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath(); /** * required int32 index = 2 [default = -1]; * @return Whether the index field is set. */ boolean hasIndex(); /** * required int32 index = 2 [default = -1]; * @return The index. */ int getIndex(); } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportCursorProto} */ public static final class SnapshotDiffReportCursorProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportCursorProto) SnapshotDiffReportCursorProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportCursorProto.newBuilder() to construct. private SnapshotDiffReportCursorProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportCursorProto() { startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; index_ = -1; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotDiffReportCursorProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder.class); } private int bitField0_; public static final int STARTPATH_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes startPath = 1; * @return Whether the startPath field is set. */ @java.lang.Override public boolean hasStartPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes startPath = 1; * @return The startPath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath() { return startPath_; } public static final int INDEX_FIELD_NUMBER = 2; private int index_ = -1; /** * required int32 index = 2 [default = -1]; * @return Whether the index field is set. */ @java.lang.Override public boolean hasIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required int32 index = 2 [default = -1]; * @return The index. */ @java.lang.Override public int getIndex() { return index_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasStartPath()) { memoizedIsInitialized = 0; return false; } if (!hasIndex()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, startPath_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeInt32(2, index_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, startPath_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32Size(2, index_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto) obj; if (hasStartPath() != other.hasStartPath()) return false; if (hasStartPath()) { if (!getStartPath() .equals(other.getStartPath())) return false; } if (hasIndex() != other.hasIndex()) return false; if (hasIndex()) { if (getIndex() != other.getIndex()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasStartPath()) { hash = (37 * hash) + STARTPATH_FIELD_NUMBER; hash = (53 * hash) + getStartPath().hashCode(); } if (hasIndex()) { hash = (37 * hash) + INDEX_FIELD_NUMBER; hash = (53 * hash) + getIndex(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportCursorProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportCursorProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; index_ = -1; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.startPath_ = startPath_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.index_ = index_; to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance()) return this; if (other.hasStartPath()) { setStartPath(other.getStartPath()); } if (other.hasIndex()) { setIndex(other.getIndex()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasStartPath()) { return false; } if (!hasIndex()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { startPath_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { index_ = input.readInt32(); bitField0_ |= 0x00000002; break; } // case 16 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes startPath = 1; * @return Whether the startPath field is set. */ @java.lang.Override public boolean hasStartPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes startPath = 1; * @return The startPath. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath() { return startPath_; } /** * required bytes startPath = 1; * @param value The startPath to set. * @return This builder for chaining. */ public Builder setStartPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } startPath_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required bytes startPath = 1; * @return This builder for chaining. */ public Builder clearStartPath() { bitField0_ = (bitField0_ & ~0x00000001); startPath_ = getDefaultInstance().getStartPath(); onChanged(); return this; } private int index_ = -1; /** * required int32 index = 2 [default = -1]; * @return Whether the index field is set. */ @java.lang.Override public boolean hasIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required int32 index = 2 [default = -1]; * @return The index. */ @java.lang.Override public int getIndex() { return index_; } /** * required int32 index = 2 [default = -1]; * @param value The index to set. * @return This builder for chaining. */ public Builder setIndex(int value) { index_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required int32 index = 2 [default = -1]; * @return This builder for chaining. */ public Builder clearIndex() { bitField0_ = (bitField0_ & ~0x00000002); index_ = -1; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportCursorProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportCursorProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportCursorProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ java.util.List getModifiedEntriesList(); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ int getModifiedEntriesCount(); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ java.util.List getModifiedEntriesOrBuilderList(); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder( int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ java.util.List getCreatedEntriesList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ int getCreatedEntriesCount(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ java.util.List getCreatedEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder( int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ java.util.List getDeletedEntriesList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ int getDeletedEntriesCount(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ java.util.List getDeletedEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder( int index); /** * required bool isFromEarlier = 4; * @return Whether the isFromEarlier field is set. */ boolean hasIsFromEarlier(); /** * required bool isFromEarlier = 4; * @return The isFromEarlier. */ boolean getIsFromEarlier(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; * @return Whether the cursor field is set. */ boolean hasCursor(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; * @return The cursor. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder(); } /** *
   **
   * Snapshot diff report listing
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingProto} */ public static final class SnapshotDiffReportListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportListingProto) SnapshotDiffReportListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportListingProto.newBuilder() to construct. private SnapshotDiffReportListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportListingProto() { modifiedEntries_ = java.util.Collections.emptyList(); createdEntries_ = java.util.Collections.emptyList(); deletedEntries_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotDiffReportListingProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder.class); } private int bitField0_; public static final int MODIFIEDENTRIES_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List modifiedEntries_; /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ @java.lang.Override public java.util.List getModifiedEntriesList() { return modifiedEntries_; } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ @java.lang.Override public java.util.List getModifiedEntriesOrBuilderList() { return modifiedEntries_; } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ @java.lang.Override public int getModifiedEntriesCount() { return modifiedEntries_.size(); } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index) { return modifiedEntries_.get(index); } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder( int index) { return modifiedEntries_.get(index); } public static final int CREATEDENTRIES_FIELD_NUMBER = 2; @SuppressWarnings("serial") private java.util.List createdEntries_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ @java.lang.Override public java.util.List getCreatedEntriesList() { return createdEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ @java.lang.Override public java.util.List getCreatedEntriesOrBuilderList() { return createdEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ @java.lang.Override public int getCreatedEntriesCount() { return createdEntries_.size(); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index) { return createdEntries_.get(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder( int index) { return createdEntries_.get(index); } public static final int DELETEDENTRIES_FIELD_NUMBER = 3; @SuppressWarnings("serial") private java.util.List deletedEntries_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ @java.lang.Override public java.util.List getDeletedEntriesList() { return deletedEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ @java.lang.Override public java.util.List getDeletedEntriesOrBuilderList() { return deletedEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ @java.lang.Override public int getDeletedEntriesCount() { return deletedEntries_.size(); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index) { return deletedEntries_.get(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder( int index) { return deletedEntries_.get(index); } public static final int ISFROMEARLIER_FIELD_NUMBER = 4; private boolean isFromEarlier_ = false; /** * required bool isFromEarlier = 4; * @return Whether the isFromEarlier field is set. */ @java.lang.Override public boolean hasIsFromEarlier() { return ((bitField0_ & 0x00000001) != 0); } /** * required bool isFromEarlier = 4; * @return The isFromEarlier. */ @java.lang.Override public boolean getIsFromEarlier() { return isFromEarlier_; } public static final int CURSOR_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_; /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; * @return Whether the cursor field is set. */ @java.lang.Override public boolean hasCursor() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; * @return The cursor. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasIsFromEarlier()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getModifiedEntriesCount(); i++) { if (!getModifiedEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getCreatedEntriesCount(); i++) { if (!getCreatedEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getDeletedEntriesCount(); i++) { if (!getDeletedEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasCursor()) { if (!getCursor().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < modifiedEntries_.size(); i++) { output.writeMessage(1, modifiedEntries_.get(i)); } for (int i = 0; i < createdEntries_.size(); i++) { output.writeMessage(2, createdEntries_.get(i)); } for (int i = 0; i < deletedEntries_.size(); i++) { output.writeMessage(3, deletedEntries_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { output.writeBool(4, isFromEarlier_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(5, getCursor()); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < modifiedEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, modifiedEntries_.get(i)); } for (int i = 0; i < createdEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, createdEntries_.get(i)); } for (int i = 0; i < deletedEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, deletedEntries_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(4, isFromEarlier_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getCursor()); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto) obj; if (!getModifiedEntriesList() .equals(other.getModifiedEntriesList())) return false; if (!getCreatedEntriesList() .equals(other.getCreatedEntriesList())) return false; if (!getDeletedEntriesList() .equals(other.getDeletedEntriesList())) return false; if (hasIsFromEarlier() != other.hasIsFromEarlier()) return false; if (hasIsFromEarlier()) { if (getIsFromEarlier() != other.getIsFromEarlier()) return false; } if (hasCursor() != other.hasCursor()) return false; if (hasCursor()) { if (!getCursor() .equals(other.getCursor())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getModifiedEntriesCount() > 0) { hash = (37 * hash) + MODIFIEDENTRIES_FIELD_NUMBER; hash = (53 * hash) + getModifiedEntriesList().hashCode(); } if (getCreatedEntriesCount() > 0) { hash = (37 * hash) + CREATEDENTRIES_FIELD_NUMBER; hash = (53 * hash) + getCreatedEntriesList().hashCode(); } if (getDeletedEntriesCount() > 0) { hash = (37 * hash) + DELETEDENTRIES_FIELD_NUMBER; hash = (53 * hash) + getDeletedEntriesList().hashCode(); } if (hasIsFromEarlier()) { hash = (37 * hash) + ISFROMEARLIER_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsFromEarlier()); } if (hasCursor()) { hash = (37 * hash) + CURSOR_FIELD_NUMBER; hash = (53 * hash) + getCursor().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report listing
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getModifiedEntriesFieldBuilder(); getCreatedEntriesFieldBuilder(); getDeletedEntriesFieldBuilder(); getCursorFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (modifiedEntriesBuilder_ == null) { modifiedEntries_ = java.util.Collections.emptyList(); } else { modifiedEntries_ = null; modifiedEntriesBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (createdEntriesBuilder_ == null) { createdEntries_ = java.util.Collections.emptyList(); } else { createdEntries_ = null; createdEntriesBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); if (deletedEntriesBuilder_ == null) { deletedEntries_ = java.util.Collections.emptyList(); } else { deletedEntries_ = null; deletedEntriesBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); isFromEarlier_ = false; cursor_ = null; if (cursorBuilder_ != null) { cursorBuilder_.dispose(); cursorBuilder_ = null; } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result) { if (modifiedEntriesBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { modifiedEntries_ = java.util.Collections.unmodifiableList(modifiedEntries_); bitField0_ = (bitField0_ & ~0x00000001); } result.modifiedEntries_ = modifiedEntries_; } else { result.modifiedEntries_ = modifiedEntriesBuilder_.build(); } if (createdEntriesBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0)) { createdEntries_ = java.util.Collections.unmodifiableList(createdEntries_); bitField0_ = (bitField0_ & ~0x00000002); } result.createdEntries_ = createdEntries_; } else { result.createdEntries_ = createdEntriesBuilder_.build(); } if (deletedEntriesBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0)) { deletedEntries_ = java.util.Collections.unmodifiableList(deletedEntries_); bitField0_ = (bitField0_ & ~0x00000004); } result.deletedEntries_ = deletedEntries_; } else { result.deletedEntries_ = deletedEntriesBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000008) != 0)) { result.isFromEarlier_ = isFromEarlier_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000010) != 0)) { result.cursor_ = cursorBuilder_ == null ? cursor_ : cursorBuilder_.build(); to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance()) return this; if (modifiedEntriesBuilder_ == null) { if (!other.modifiedEntries_.isEmpty()) { if (modifiedEntries_.isEmpty()) { modifiedEntries_ = other.modifiedEntries_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureModifiedEntriesIsMutable(); modifiedEntries_.addAll(other.modifiedEntries_); } onChanged(); } } else { if (!other.modifiedEntries_.isEmpty()) { if (modifiedEntriesBuilder_.isEmpty()) { modifiedEntriesBuilder_.dispose(); modifiedEntriesBuilder_ = null; modifiedEntries_ = other.modifiedEntries_; bitField0_ = (bitField0_ & ~0x00000001); modifiedEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getModifiedEntriesFieldBuilder() : null; } else { modifiedEntriesBuilder_.addAllMessages(other.modifiedEntries_); } } } if (createdEntriesBuilder_ == null) { if (!other.createdEntries_.isEmpty()) { if (createdEntries_.isEmpty()) { createdEntries_ = other.createdEntries_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureCreatedEntriesIsMutable(); createdEntries_.addAll(other.createdEntries_); } onChanged(); } } else { if (!other.createdEntries_.isEmpty()) { if (createdEntriesBuilder_.isEmpty()) { createdEntriesBuilder_.dispose(); createdEntriesBuilder_ = null; createdEntries_ = other.createdEntries_; bitField0_ = (bitField0_ & ~0x00000002); createdEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getCreatedEntriesFieldBuilder() : null; } else { createdEntriesBuilder_.addAllMessages(other.createdEntries_); } } } if (deletedEntriesBuilder_ == null) { if (!other.deletedEntries_.isEmpty()) { if (deletedEntries_.isEmpty()) { deletedEntries_ = other.deletedEntries_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureDeletedEntriesIsMutable(); deletedEntries_.addAll(other.deletedEntries_); } onChanged(); } } else { if (!other.deletedEntries_.isEmpty()) { if (deletedEntriesBuilder_.isEmpty()) { deletedEntriesBuilder_.dispose(); deletedEntriesBuilder_ = null; deletedEntries_ = other.deletedEntries_; bitField0_ = (bitField0_ & ~0x00000004); deletedEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getDeletedEntriesFieldBuilder() : null; } else { deletedEntriesBuilder_.addAllMessages(other.deletedEntries_); } } } if (other.hasIsFromEarlier()) { setIsFromEarlier(other.getIsFromEarlier()); } if (other.hasCursor()) { mergeCursor(other.getCursor()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasIsFromEarlier()) { return false; } for (int i = 0; i < getModifiedEntriesCount(); i++) { if (!getModifiedEntries(i).isInitialized()) { return false; } } for (int i = 0; i < getCreatedEntriesCount(); i++) { if (!getCreatedEntries(i).isInitialized()) { return false; } } for (int i = 0; i < getDeletedEntriesCount(); i++) { if (!getDeletedEntries(i).isInitialized()) { return false; } } if (hasCursor()) { if (!getCursor().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER, extensionRegistry); if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.add(m); } else { modifiedEntriesBuilder_.addMessage(m); } break; } // case 10 case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER, extensionRegistry); if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.add(m); } else { createdEntriesBuilder_.addMessage(m); } break; } // case 18 case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER, extensionRegistry); if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.add(m); } else { deletedEntriesBuilder_.addMessage(m); } break; } // case 26 case 32: { isFromEarlier_ = input.readBool(); bitField0_ |= 0x00000008; break; } // case 32 case 42: { input.readMessage( getCursorFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000010; break; } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List modifiedEntries_ = java.util.Collections.emptyList(); private void ensureModifiedEntriesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { modifiedEntries_ = new java.util.ArrayList(modifiedEntries_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> modifiedEntriesBuilder_; /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesList() { if (modifiedEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(modifiedEntries_); } else { return modifiedEntriesBuilder_.getMessageList(); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public int getModifiedEntriesCount() { if (modifiedEntriesBuilder_ == null) { return modifiedEntries_.size(); } else { return modifiedEntriesBuilder_.getCount(); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index) { if (modifiedEntriesBuilder_ == null) { return modifiedEntries_.get(index); } else { return modifiedEntriesBuilder_.getMessage(index); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder setModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (modifiedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureModifiedEntriesIsMutable(); modifiedEntries_.set(index, value); onChanged(); } else { modifiedEntriesBuilder_.setMessage(index, value); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder setModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.set(index, builderForValue.build()); onChanged(); } else { modifiedEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (modifiedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureModifiedEntriesIsMutable(); modifiedEntries_.add(value); onChanged(); } else { modifiedEntriesBuilder_.addMessage(value); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (modifiedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureModifiedEntriesIsMutable(); modifiedEntries_.add(index, value); onChanged(); } else { modifiedEntriesBuilder_.addMessage(index, value); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.add(builderForValue.build()); onChanged(); } else { modifiedEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.add(index, builderForValue.build()); onChanged(); } else { modifiedEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addAllModifiedEntries( java.lang.Iterable values) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, modifiedEntries_); onChanged(); } else { modifiedEntriesBuilder_.addAllMessages(values); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder clearModifiedEntries() { if (modifiedEntriesBuilder_ == null) { modifiedEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { modifiedEntriesBuilder_.clear(); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder removeModifiedEntries(int index) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.remove(index); onChanged(); } else { modifiedEntriesBuilder_.remove(index); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getModifiedEntriesBuilder( int index) { return getModifiedEntriesFieldBuilder().getBuilder(index); } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder( int index) { if (modifiedEntriesBuilder_ == null) { return modifiedEntries_.get(index); } else { return modifiedEntriesBuilder_.getMessageOrBuilder(index); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesOrBuilderList() { if (modifiedEntriesBuilder_ != null) { return modifiedEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(modifiedEntries_); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addModifiedEntriesBuilder() { return getModifiedEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addModifiedEntriesBuilder( int index) { return getModifiedEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesBuilderList() { return getModifiedEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> getModifiedEntriesFieldBuilder() { if (modifiedEntriesBuilder_ == null) { modifiedEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>( modifiedEntries_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); modifiedEntries_ = null; } return modifiedEntriesBuilder_; } private java.util.List createdEntries_ = java.util.Collections.emptyList(); private void ensureCreatedEntriesIsMutable() { if (!((bitField0_ & 0x00000002) != 0)) { createdEntries_ = new java.util.ArrayList(createdEntries_); bitField0_ |= 0x00000002; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> createdEntriesBuilder_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesList() { if (createdEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(createdEntries_); } else { return createdEntriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public int getCreatedEntriesCount() { if (createdEntriesBuilder_ == null) { return createdEntries_.size(); } else { return createdEntriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index) { if (createdEntriesBuilder_ == null) { return createdEntries_.get(index); } else { return createdEntriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder setCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (createdEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCreatedEntriesIsMutable(); createdEntries_.set(index, value); onChanged(); } else { createdEntriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder setCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.set(index, builderForValue.build()); onChanged(); } else { createdEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (createdEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCreatedEntriesIsMutable(); createdEntries_.add(value); onChanged(); } else { createdEntriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (createdEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCreatedEntriesIsMutable(); createdEntries_.add(index, value); onChanged(); } else { createdEntriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.add(builderForValue.build()); onChanged(); } else { createdEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.add(index, builderForValue.build()); onChanged(); } else { createdEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addAllCreatedEntries( java.lang.Iterable values) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, createdEntries_); onChanged(); } else { createdEntriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder clearCreatedEntries() { if (createdEntriesBuilder_ == null) { createdEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { createdEntriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder removeCreatedEntries(int index) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.remove(index); onChanged(); } else { createdEntriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getCreatedEntriesBuilder( int index) { return getCreatedEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder( int index) { if (createdEntriesBuilder_ == null) { return createdEntries_.get(index); } else { return createdEntriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesOrBuilderList() { if (createdEntriesBuilder_ != null) { return createdEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(createdEntries_); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addCreatedEntriesBuilder() { return getCreatedEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addCreatedEntriesBuilder( int index) { return getCreatedEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesBuilderList() { return getCreatedEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> getCreatedEntriesFieldBuilder() { if (createdEntriesBuilder_ == null) { createdEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>( createdEntries_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); createdEntries_ = null; } return createdEntriesBuilder_; } private java.util.List deletedEntries_ = java.util.Collections.emptyList(); private void ensureDeletedEntriesIsMutable() { if (!((bitField0_ & 0x00000004) != 0)) { deletedEntries_ = new java.util.ArrayList(deletedEntries_); bitField0_ |= 0x00000004; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> deletedEntriesBuilder_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesList() { if (deletedEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(deletedEntries_); } else { return deletedEntriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public int getDeletedEntriesCount() { if (deletedEntriesBuilder_ == null) { return deletedEntries_.size(); } else { return deletedEntriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index) { if (deletedEntriesBuilder_ == null) { return deletedEntries_.get(index); } else { return deletedEntriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder setDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (deletedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDeletedEntriesIsMutable(); deletedEntries_.set(index, value); onChanged(); } else { deletedEntriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder setDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.set(index, builderForValue.build()); onChanged(); } else { deletedEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (deletedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDeletedEntriesIsMutable(); deletedEntries_.add(value); onChanged(); } else { deletedEntriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (deletedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDeletedEntriesIsMutable(); deletedEntries_.add(index, value); onChanged(); } else { deletedEntriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.add(builderForValue.build()); onChanged(); } else { deletedEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.add(index, builderForValue.build()); onChanged(); } else { deletedEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addAllDeletedEntries( java.lang.Iterable values) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, deletedEntries_); onChanged(); } else { deletedEntriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder clearDeletedEntries() { if (deletedEntriesBuilder_ == null) { deletedEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { deletedEntriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder removeDeletedEntries(int index) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.remove(index); onChanged(); } else { deletedEntriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getDeletedEntriesBuilder( int index) { return getDeletedEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder( int index) { if (deletedEntriesBuilder_ == null) { return deletedEntries_.get(index); } else { return deletedEntriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesOrBuilderList() { if (deletedEntriesBuilder_ != null) { return deletedEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(deletedEntries_); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addDeletedEntriesBuilder() { return getDeletedEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addDeletedEntriesBuilder( int index) { return getDeletedEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesBuilderList() { return getDeletedEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> getDeletedEntriesFieldBuilder() { if (deletedEntriesBuilder_ == null) { deletedEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>( deletedEntries_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); deletedEntries_ = null; } return deletedEntriesBuilder_; } private boolean isFromEarlier_ ; /** * required bool isFromEarlier = 4; * @return Whether the isFromEarlier field is set. */ @java.lang.Override public boolean hasIsFromEarlier() { return ((bitField0_ & 0x00000008) != 0); } /** * required bool isFromEarlier = 4; * @return The isFromEarlier. */ @java.lang.Override public boolean getIsFromEarlier() { return isFromEarlier_; } /** * required bool isFromEarlier = 4; * @param value The isFromEarlier to set. * @return This builder for chaining. */ public Builder setIsFromEarlier(boolean value) { isFromEarlier_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required bool isFromEarlier = 4; * @return This builder for chaining. */ public Builder clearIsFromEarlier() { bitField0_ = (bitField0_ & ~0x00000008); isFromEarlier_ = false; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> cursorBuilder_; /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; * @return Whether the cursor field is set. */ public boolean hasCursor() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; * @return The cursor. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() { if (cursorBuilder_ == null) { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } else { return cursorBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder setCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) { if (cursorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } cursor_ = value; } else { cursorBuilder_.setMessage(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder setCursor( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder builderForValue) { if (cursorBuilder_ == null) { cursor_ = builderForValue.build(); } else { cursorBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder mergeCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) { if (cursorBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && cursor_ != null && cursor_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance()) { getCursorBuilder().mergeFrom(value); } else { cursor_ = value; } } else { cursorBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder clearCursor() { bitField0_ = (bitField0_ & ~0x00000010); cursor_ = null; if (cursorBuilder_ != null) { cursorBuilder_.dispose(); cursorBuilder_ = null; } onChanged(); return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder getCursorBuilder() { bitField0_ |= 0x00000010; onChanged(); return getCursorFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() { if (cursorBuilder_ != null) { return cursorBuilder_.getMessageOrBuilder(); } else { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> getCursorFieldBuilder() { if (cursorBuilder_ == null) { cursorBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder>( getCursor(), getParentForChildren(), isClean()); cursor_ = null; } return cursorBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 blockId = 1; * @return Whether the blockId field is set. */ boolean hasBlockId(); /** * required uint64 blockId = 1; * @return The blockId. */ long getBlockId(); /** * required uint64 genStamp = 2; * @return Whether the genStamp field is set. */ boolean hasGenStamp(); /** * required uint64 genStamp = 2; * @return The genStamp. */ long getGenStamp(); /** * optional uint64 numBytes = 3 [default = 0]; * @return Whether the numBytes field is set. */ boolean hasNumBytes(); /** * optional uint64 numBytes = 3 [default = 0]; * @return The numBytes. */ long getNumBytes(); } /** *
   **
   * Block information
   * Please be wary of adding additional fields here, since INodeFiles
   * need to fit in PB's default max message size of 64MB.
   * We restrict the max # of blocks per file
   * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
   * to avoid changing this.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockProto} */ public static final class BlockProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockProto) BlockProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockProto.newBuilder() to construct. private BlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockProto() { } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new BlockProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); } private int bitField0_; public static final int BLOCKID_FIELD_NUMBER = 1; private long blockId_ = 0L; /** * required uint64 blockId = 1; * @return Whether the blockId field is set. */ @java.lang.Override public boolean hasBlockId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockId = 1; * @return The blockId. */ @java.lang.Override public long getBlockId() { return blockId_; } public static final int GENSTAMP_FIELD_NUMBER = 2; private long genStamp_ = 0L; /** * required uint64 genStamp = 2; * @return Whether the genStamp field is set. */ @java.lang.Override public boolean hasGenStamp() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 genStamp = 2; * @return The genStamp. */ @java.lang.Override public long getGenStamp() { return genStamp_; } public static final int NUMBYTES_FIELD_NUMBER = 3; private long numBytes_ = 0L; /** * optional uint64 numBytes = 3 [default = 0]; * @return Whether the numBytes field is set. */ @java.lang.Override public boolean hasNumBytes() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 numBytes = 3 [default = 0]; * @return The numBytes. */ @java.lang.Override public long getNumBytes() { return numBytes_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasBlockId()) { memoizedIsInitialized = 0; return false; } if (!hasGenStamp()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, blockId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, genStamp_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, numBytes_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, blockId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, genStamp_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, numBytes_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) obj; if (hasBlockId() != other.hasBlockId()) return false; if (hasBlockId()) { if (getBlockId() != other.getBlockId()) return false; } if (hasGenStamp() != other.hasGenStamp()) return false; if (hasGenStamp()) { if (getGenStamp() != other.getGenStamp()) return false; } if (hasNumBytes() != other.hasNumBytes()) return false; if (hasNumBytes()) { if (getNumBytes() != other.getNumBytes()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockId()); } if (hasGenStamp()) { hash = (37 * hash) + GENSTAMP_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getGenStamp()); } if (hasNumBytes()) { hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumBytes()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Block information
     * Please be wary of adding additional fields here, since INodeFiles
     * need to fit in PB's default max message size of 64MB.
     * We restrict the max # of blocks per file
     * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
     * to avoid changing this.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; blockId_ = 0L; genStamp_ = 0L; numBytes_ = 0L; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.blockId_ = blockId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.genStamp_ = genStamp_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.numBytes_ = numBytes_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) return this; if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (other.hasGenStamp()) { setGenStamp(other.getGenStamp()); } if (other.hasNumBytes()) { setNumBytes(other.getNumBytes()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasBlockId()) { return false; } if (!hasGenStamp()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { blockId_ = input.readUInt64(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { genStamp_ = input.readUInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { numBytes_ = input.readUInt64(); bitField0_ |= 0x00000004; break; } // case 24 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private long blockId_ ; /** * required uint64 blockId = 1; * @return Whether the blockId field is set. */ @java.lang.Override public boolean hasBlockId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockId = 1; * @return The blockId. */ @java.lang.Override public long getBlockId() { return blockId_; } /** * required uint64 blockId = 1; * @param value The blockId to set. * @return This builder for chaining. */ public Builder setBlockId(long value) { blockId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required uint64 blockId = 1; * @return This builder for chaining. */ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000001); blockId_ = 0L; onChanged(); return this; } private long genStamp_ ; /** * required uint64 genStamp = 2; * @return Whether the genStamp field is set. */ @java.lang.Override public boolean hasGenStamp() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 genStamp = 2; * @return The genStamp. */ @java.lang.Override public long getGenStamp() { return genStamp_; } /** * required uint64 genStamp = 2; * @param value The genStamp to set. * @return This builder for chaining. */ public Builder setGenStamp(long value) { genStamp_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required uint64 genStamp = 2; * @return This builder for chaining. */ public Builder clearGenStamp() { bitField0_ = (bitField0_ & ~0x00000002); genStamp_ = 0L; onChanged(); return this; } private long numBytes_ ; /** * optional uint64 numBytes = 3 [default = 0]; * @return Whether the numBytes field is set. */ @java.lang.Override public boolean hasNumBytes() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 numBytes = 3 [default = 0]; * @return The numBytes. */ @java.lang.Override public long getNumBytes() { return numBytes_; } /** * optional uint64 numBytes = 3 [default = 0]; * @param value The numBytes to set. * @return This builder for chaining. */ public Builder setNumBytes(long value) { numBytes_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional uint64 numBytes = 3 [default = 0]; * @return This builder for chaining. */ public Builder clearNumBytes() { bitField0_ = (bitField0_ & ~0x00000004); numBytes_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string snapshotName = 1; * @return Whether the snapshotName field is set. */ boolean hasSnapshotName(); /** * required string snapshotName = 1; * @return The snapshotName. */ java.lang.String getSnapshotName(); /** * required string snapshotName = 1; * @return The bytes for snapshotName. */ org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotNameBytes(); /** * required string snapshotRoot = 2; * @return Whether the snapshotRoot field is set. */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 2; * @return The snapshotRoot. */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 2; * @return The bytes for snapshotRoot. */ org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; * @return Whether the permission field is set. */ boolean hasPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; * @return The permission. */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); /** * required string owner = 4; * @return Whether the owner field is set. */ boolean hasOwner(); /** * required string owner = 4; * @return The owner. */ java.lang.String getOwner(); /** * required string owner = 4; * @return The bytes for owner. */ org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes(); /** * required string group = 5; * @return Whether the group field is set. */ boolean hasGroup(); /** * required string group = 5; * @return The group. */ java.lang.String getGroup(); /** * required string group = 5; * @return The bytes for group. */ org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes(); /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; * @return Whether the createTime field is set. */ boolean hasCreateTime(); /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; * @return The createTime. */ java.lang.String getCreateTime(); /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; * @return The bytes for createTime. */ org.apache.hadoop.thirdparty.protobuf.ByteString getCreateTimeBytes(); } /** *
   **
   * Information related to a snapshot
   * TODO: add more information
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto} */ public static final class SnapshotInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotInfoProto) SnapshotInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotInfoProto.newBuilder() to construct. private SnapshotInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotInfoProto() { snapshotName_ = ""; snapshotRoot_ = ""; owner_ = ""; group_ = ""; createTime_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new SnapshotInfoProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class); } private int bitField0_; public static final int SNAPSHOTNAME_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object snapshotName_ = ""; /** * required string snapshotName = 1; * @return Whether the snapshotName field is set. */ @java.lang.Override public boolean hasSnapshotName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string snapshotName = 1; * @return The snapshotName. */ @java.lang.Override public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotName_ = s; } return s; } } /** * required string snapshotName = 1; * @return The bytes for snapshotName. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int SNAPSHOTROOT_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 2; * @return Whether the snapshotRoot field is set. */ @java.lang.Override public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string snapshotRoot = 2; * @return The snapshotRoot. */ @java.lang.Override public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 2; * @return The bytes for snapshotRoot. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int PERMISSION_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; /** * required .hadoop.hdfs.FsPermissionProto permission = 3; * @return Whether the permission field is set. */ @java.lang.Override public boolean hasPermission() { return ((bitField0_ & 0x00000004) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; * @return The permission. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } public static final int OWNER_FIELD_NUMBER = 4; @SuppressWarnings("serial") private volatile java.lang.Object owner_ = ""; /** * required string owner = 4; * @return Whether the owner field is set. */ @java.lang.Override public boolean hasOwner() { return ((bitField0_ & 0x00000008) != 0); } /** * required string owner = 4; * @return The owner. */ @java.lang.Override public java.lang.String getOwner() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } } /** * required string owner = 4; * @return The bytes for owner. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int GROUP_FIELD_NUMBER = 5; @SuppressWarnings("serial") private volatile java.lang.Object group_ = ""; /** * required string group = 5; * @return Whether the group field is set. */ @java.lang.Override public boolean hasGroup() { return ((bitField0_ & 0x00000010) != 0); } /** * required string group = 5; * @return The group. */ @java.lang.Override public java.lang.String getGroup() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } } /** * required string group = 5; * @return The bytes for group. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int CREATETIME_FIELD_NUMBER = 6; @SuppressWarnings("serial") private volatile java.lang.Object createTime_ = ""; /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; * @return Whether the createTime field is set. */ @java.lang.Override public boolean hasCreateTime() { return ((bitField0_ & 0x00000020) != 0); } /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; * @return The createTime. */ @java.lang.Override public java.lang.String getCreateTime() { java.lang.Object ref = createTime_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { createTime_ = s; } return s; } } /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; * @return The bytes for createTime. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getCreateTimeBytes() { java.lang.Object ref = createTime_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); createTime_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSnapshotName()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasPermission()) { memoizedIsInitialized = 0; return false; } if (!hasOwner()) { memoizedIsInitialized = 0; return false; } if (!hasGroup()) { memoizedIsInitialized = 0; return false; } if (!hasCreateTime()) { memoizedIsInitialized = 0; return false; } if (!getPermission().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, snapshotName_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, snapshotRoot_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeMessage(3, getPermission()); } if (((bitField0_ & 0x00000008) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, owner_); } if (((bitField0_ & 0x00000010) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, group_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, createTime_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, snapshotName_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, snapshotRoot_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, getPermission()); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, owner_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, group_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, createTime_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) obj; if (hasSnapshotName() != other.hasSnapshotName()) return false; if (hasSnapshotName()) { if (!getSnapshotName() .equals(other.getSnapshotName())) return false; } if (hasSnapshotRoot() != other.hasSnapshotRoot()) return false; if (hasSnapshotRoot()) { if (!getSnapshotRoot() .equals(other.getSnapshotRoot())) return false; } if (hasPermission() != other.hasPermission()) return false; if (hasPermission()) { if (!getPermission() .equals(other.getPermission())) return false; } if (hasOwner() != other.hasOwner()) return false; if (hasOwner()) { if (!getOwner() .equals(other.getOwner())) return false; } if (hasGroup() != other.hasGroup()) return false; if (hasGroup()) { if (!getGroup() .equals(other.getGroup())) return false; } if (hasCreateTime() != other.hasCreateTime()) return false; if (hasCreateTime()) { if (!getCreateTime() .equals(other.getCreateTime())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotName()) { hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; hash = (53 * hash) + getSnapshotName().hashCode(); } if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + getPermission().hashCode(); } if (hasOwner()) { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } if (hasGroup()) { hash = (37 * hash) + GROUP_FIELD_NUMBER; hash = (53 * hash) + getGroup().hashCode(); } if (hasCreateTime()) { hash = (37 * hash) + CREATETIME_FIELD_NUMBER; hash = (53 * hash) + getCreateTime().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Information related to a snapshot
     * TODO: add more information
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPermissionFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; snapshotName_ = ""; snapshotRoot_ = ""; permission_ = null; if (permissionBuilder_ != null) { permissionBuilder_.dispose(); permissionBuilder_ = null; } owner_ = ""; group_ = ""; createTime_ = ""; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.snapshotName_ = snapshotName_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.snapshotRoot_ = snapshotRoot_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.permission_ = permissionBuilder_ == null ? permission_ : permissionBuilder_.build(); to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.owner_ = owner_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.group_ = group_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.createTime_ = createTime_; to_bitField0_ |= 0x00000020; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance()) return this; if (other.hasSnapshotName()) { snapshotName_ = other.snapshotName_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasSnapshotRoot()) { snapshotRoot_ = other.snapshotRoot_; bitField0_ |= 0x00000002; onChanged(); } if (other.hasPermission()) { mergePermission(other.getPermission()); } if (other.hasOwner()) { owner_ = other.owner_; bitField0_ |= 0x00000008; onChanged(); } if (other.hasGroup()) { group_ = other.group_; bitField0_ |= 0x00000010; onChanged(); } if (other.hasCreateTime()) { createTime_ = other.createTime_; bitField0_ |= 0x00000020; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSnapshotName()) { return false; } if (!hasSnapshotRoot()) { return false; } if (!hasPermission()) { return false; } if (!hasOwner()) { return false; } if (!hasGroup()) { return false; } if (!hasCreateTime()) { return false; } if (!getPermission().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { snapshotName_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 18: { snapshotRoot_ = input.readBytes(); bitField0_ |= 0x00000002; break; } // case 18 case 26: { input.readMessage( getPermissionFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000004; break; } // case 26 case 34: { owner_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 case 42: { group_ = input.readBytes(); bitField0_ |= 0x00000010; break; } // case 42 case 50: { createTime_ = input.readBytes(); bitField0_ |= 0x00000020; break; } // case 50 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object snapshotName_ = ""; /** * required string snapshotName = 1; * @return Whether the snapshotName field is set. */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string snapshotName = 1; * @return The snapshotName. */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string snapshotName = 1; * @return The bytes for snapshotName. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string snapshotName = 1; * @param value The snapshotName to set. * @return This builder for chaining. */ public Builder setSnapshotName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } snapshotName_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string snapshotName = 1; * @return This builder for chaining. */ public Builder clearSnapshotName() { snapshotName_ = getDefaultInstance().getSnapshotName(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string snapshotName = 1; * @param value The bytes for snapshotName to set. * @return This builder for chaining. */ public Builder setSnapshotNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } snapshotName_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 2; * @return Whether the snapshotRoot field is set. */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string snapshotRoot = 2; * @return The snapshotRoot. */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 2; * @return The bytes for snapshotRoot. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 2; * @param value The snapshotRoot to set. * @return This builder for chaining. */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } snapshotRoot_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required string snapshotRoot = 2; * @return This builder for chaining. */ public Builder clearSnapshotRoot() { snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * required string snapshotRoot = 2; * @param value The bytes for snapshotRoot to set. * @return This builder for chaining. */ public Builder setSnapshotRootBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } snapshotRoot_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_; /** * required .hadoop.hdfs.FsPermissionProto permission = 3; * @return Whether the permission field is set. */ public boolean hasPermission() { return ((bitField0_ & 0x00000004) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; * @return The permission. */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { if (permissionBuilder_ == null) { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } else { return permissionBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } permission_ = value; } else { permissionBuilder_.setMessage(value); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder setPermission( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (permissionBuilder_ == null) { permission_ = builderForValue.build(); } else { permissionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0) && permission_ != null && permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { getPermissionBuilder().mergeFrom(value); } else { permission_ = value; } } else { permissionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder clearPermission() { bitField0_ = (bitField0_ & ~0x00000004); permission_ = null; if (permissionBuilder_ != null) { permissionBuilder_.dispose(); permissionBuilder_ = null; } onChanged(); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() { bitField0_ |= 0x00000004; onChanged(); return getPermissionFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { if (permissionBuilder_ != null) { return permissionBuilder_.getMessageOrBuilder(); } else { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getPermissionFieldBuilder() { if (permissionBuilder_ == null) { permissionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( getPermission(), getParentForChildren(), isClean()); permission_ = null; } return permissionBuilder_; } private java.lang.Object owner_ = ""; /** * required string owner = 4; * @return Whether the owner field is set. */ public boolean hasOwner() { return ((bitField0_ & 0x00000008) != 0); } /** * required string owner = 4; * @return The owner. */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string owner = 4; * @return The bytes for owner. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string owner = 4; * @param value The owner to set. * @return This builder for chaining. */ public Builder setOwner( java.lang.String value) { if (value == null) { throw new NullPointerException(); } owner_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required string owner = 4; * @return This builder for chaining. */ public Builder clearOwner() { owner_ = getDefaultInstance().getOwner(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); return this; } /** * required string owner = 4; * @param value The bytes for owner to set. * @return This builder for chaining. */ public Builder setOwnerBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } owner_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } private java.lang.Object group_ = ""; /** * required string group = 5; * @return Whether the group field is set. */ public boolean hasGroup() { return ((bitField0_ & 0x00000010) != 0); } /** * required string group = 5; * @return The group. */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string group = 5; * @return The bytes for group. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string group = 5; * @param value The group to set. * @return This builder for chaining. */ public Builder setGroup( java.lang.String value) { if (value == null) { throw new NullPointerException(); } group_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * required string group = 5; * @return This builder for chaining. */ public Builder clearGroup() { group_ = getDefaultInstance().getGroup(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * required string group = 5; * @param value The bytes for group to set. * @return This builder for chaining. */ public Builder setGroupBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } group_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } private java.lang.Object createTime_ = ""; /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; * @return Whether the createTime field is set. */ public boolean hasCreateTime() { return ((bitField0_ & 0x00000020) != 0); } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; * @return The createTime. */ public java.lang.String getCreateTime() { java.lang.Object ref = createTime_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { createTime_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; * @return The bytes for createTime. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCreateTimeBytes() { java.lang.Object ref = createTime_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); createTime_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; * @param value The createTime to set. * @return This builder for chaining. */ public Builder setCreateTime( java.lang.String value) { if (value == null) { throw new NullPointerException(); } createTime_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; * @return This builder for chaining. */ public Builder clearCreateTime() { createTime_ = getDefaultInstance().getCreateTime(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; * @param value The bytes for createTime to set. * @return This builder for chaining. */ public Builder setCreateTimeBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } createTime_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RollingUpgradeStatusProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RollingUpgradeStatusProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string blockPoolId = 1; * @return Whether the blockPoolId field is set. */ boolean hasBlockPoolId(); /** * required string blockPoolId = 1; * @return The blockPoolId. */ java.lang.String getBlockPoolId(); /** * required string blockPoolId = 1; * @return The bytes for blockPoolId. */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes(); /** * optional bool finalized = 2 [default = false]; * @return Whether the finalized field is set. */ boolean hasFinalized(); /** * optional bool finalized = 2 [default = false]; * @return The finalized. */ boolean getFinalized(); } /** *
   **
   * Rolling upgrade status
   * 
* * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto} */ public static final class RollingUpgradeStatusProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.RollingUpgradeStatusProto) RollingUpgradeStatusProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RollingUpgradeStatusProto.newBuilder() to construct. private RollingUpgradeStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RollingUpgradeStatusProto() { blockPoolId_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new RollingUpgradeStatusProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class); } private int bitField0_; public static final int BLOCKPOOLID_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 1; * @return Whether the blockPoolId field is set. */ @java.lang.Override public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** * required string blockPoolId = 1; * @return The blockPoolId. */ @java.lang.Override public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * required string blockPoolId = 1; * @return The bytes for blockPoolId. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int FINALIZED_FIELD_NUMBER = 2; private boolean finalized_ = false; /** * optional bool finalized = 2 [default = false]; * @return Whether the finalized field is set. */ @java.lang.Override public boolean hasFinalized() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool finalized = 2 [default = false]; * @return The finalized. */ @java.lang.Override public boolean getFinalized() { return finalized_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasBlockPoolId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, blockPoolId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, finalized_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, blockPoolId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, finalized_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) obj; if (hasBlockPoolId() != other.hasBlockPoolId()) return false; if (hasBlockPoolId()) { if (!getBlockPoolId() .equals(other.getBlockPoolId())) return false; } if (hasFinalized() != other.hasFinalized()) return false; if (hasFinalized()) { if (getFinalized() != other.getFinalized()) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasFinalized()) { hash = (37 * hash) + FINALIZED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getFinalized()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Rolling upgrade status
     * 
* * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RollingUpgradeStatusProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; blockPoolId_ = ""; finalized_ = false; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.blockPoolId_ = blockPoolId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.finalized_ = finalized_; to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) return this; if (other.hasBlockPoolId()) { blockPoolId_ = other.blockPoolId_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasFinalized()) { setFinalized(other.getFinalized()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasBlockPoolId()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { blockPoolId_ = input.readBytes(); bitField0_ |= 0x00000001; break; } // case 10 case 16: { finalized_ = input.readBool(); bitField0_ |= 0x00000002; break; } // case 16 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 1; * @return Whether the blockPoolId field is set. */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** * required string blockPoolId = 1; * @return The blockPoolId. */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolId = 1; * @return The bytes for blockPoolId. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string blockPoolId = 1; * @param value The blockPoolId to set. * @return This builder for chaining. */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } blockPoolId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * required string blockPoolId = 1; * @return This builder for chaining. */ public Builder clearBlockPoolId() { blockPoolId_ = getDefaultInstance().getBlockPoolId(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * required string blockPoolId = 1; * @param value The bytes for blockPoolId to set. * @return This builder for chaining. */ public Builder setBlockPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } blockPoolId_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private boolean finalized_ ; /** * optional bool finalized = 2 [default = false]; * @return Whether the finalized field is set. */ @java.lang.Override public boolean hasFinalized() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool finalized = 2 [default = false]; * @return The finalized. */ @java.lang.Override public boolean getFinalized() { return finalized_; } /** * optional bool finalized = 2 [default = false]; * @param value The finalized to set. * @return This builder for chaining. */ public Builder setFinalized(boolean value) { finalized_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional bool finalized = 2 [default = false]; * @return This builder for chaining. */ public Builder clearFinalized() { bitField0_ = (bitField0_ & ~0x00000002); finalized_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollingUpgradeStatusProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollingUpgradeStatusProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RollingUpgradeStatusProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageUuidsProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageUuidsProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated string storageUuids = 1; * @return A list containing the storageUuids. */ java.util.List getStorageUuidsList(); /** * repeated string storageUuids = 1; * @return The count of storageUuids. */ int getStorageUuidsCount(); /** * repeated string storageUuids = 1; * @param index The index of the element to return. * @return The storageUuids at the given index. */ java.lang.String getStorageUuids(int index); /** * repeated string storageUuids = 1; * @param index The index of the value to return. * @return The bytes of the storageUuids at the given index. */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidsBytes(int index); } /** *
   **
   * A list of storage IDs.
   * 
* * Protobuf type {@code hadoop.hdfs.StorageUuidsProto} */ public static final class StorageUuidsProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageUuidsProto) StorageUuidsProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageUuidsProto.newBuilder() to construct. private StorageUuidsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageUuidsProto() { storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new StorageUuidsProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class); } public static final int STORAGEUUIDS_FIELD_NUMBER = 1; @SuppressWarnings("serial") private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageUuids_; /** * repeated string storageUuids = 1; * @return A list containing the storageUuids. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageUuidsList() { return storageUuids_; } /** * repeated string storageUuids = 1; * @return The count of storageUuids. */ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 1; * @param index The index of the element to return. * @return The storageUuids at the given index. */ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 1; * @param index The index of the value to return. * @return The bytes of the storageUuids at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < storageUuids_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuids_.getRaw(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < storageUuids_.size(); i++) { dataSize += computeStringSizeNoTag(storageUuids_.getRaw(i)); } size += dataSize; size += 1 * getStorageUuidsList().size(); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) obj; if (!getStorageUuidsList() .equals(other.getStorageUuidsList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getStorageUuidsCount() > 0) { hash = (37 * hash) + STORAGEUUIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageUuidsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A list of storage IDs.
     * 
* * Protobuf type {@code hadoop.hdfs.StorageUuidsProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageUuidsProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result) { if (((bitField0_ & 0x00000001) != 0)) { storageUuids_ = storageUuids_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000001); } result.storageUuids_ = storageUuids_; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result) { int from_bitField0_ = bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance()) return this; if (!other.storageUuids_.isEmpty()) { if (storageUuids_.isEmpty()) { storageUuids_ = other.storageUuids_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureStorageUuidsIsMutable(); storageUuids_.addAll(other.storageUuids_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); ensureStorageUuidsIsMutable(); storageUuids_.add(bs); break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageUuidsIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { storageUuids_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageUuids_); bitField0_ |= 0x00000001; } } /** * repeated string storageUuids = 1; * @return A list containing the storageUuids. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageUuidsList() { return storageUuids_.getUnmodifiableView(); } /** * repeated string storageUuids = 1; * @return The count of storageUuids. */ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 1; * @param index The index of the element to return. * @return The storageUuids at the given index. */ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 1; * @param index The index of the value to return. * @return The bytes of the storageUuids at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } /** * repeated string storageUuids = 1; * @param index The index to set the value at. * @param value The storageUuids to set. * @return This builder for chaining. */ public Builder setStorageUuids( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.set(index, value); onChanged(); return this; } /** * repeated string storageUuids = 1; * @param value The storageUuids to add. * @return This builder for chaining. */ public Builder addStorageUuids( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } /** * repeated string storageUuids = 1; * @param values The storageUuids to add. * @return This builder for chaining. */ public Builder addAllStorageUuids( java.lang.Iterable values) { ensureStorageUuidsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, storageUuids_); onChanged(); return this; } /** * repeated string storageUuids = 1; * @return This builder for chaining. */ public Builder clearStorageUuids() { storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string storageUuids = 1; * @param value The bytes of the storageUuids to add. * @return This builder for chaining. */ public Builder addStorageUuidsBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageUuidsProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageUuidsProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageUuidsProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockTokenSecretProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockTokenSecretProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 expiryDate = 1; * @return Whether the expiryDate field is set. */ boolean hasExpiryDate(); /** * optional uint64 expiryDate = 1; * @return The expiryDate. */ long getExpiryDate(); /** * optional uint32 keyId = 2; * @return Whether the keyId field is set. */ boolean hasKeyId(); /** * optional uint32 keyId = 2; * @return The keyId. */ int getKeyId(); /** * optional string userId = 3; * @return Whether the userId field is set. */ boolean hasUserId(); /** * optional string userId = 3; * @return The userId. */ java.lang.String getUserId(); /** * optional string userId = 3; * @return The bytes for userId. */ org.apache.hadoop.thirdparty.protobuf.ByteString getUserIdBytes(); /** * optional string blockPoolId = 4; * @return Whether the blockPoolId field is set. */ boolean hasBlockPoolId(); /** * optional string blockPoolId = 4; * @return The blockPoolId. */ java.lang.String getBlockPoolId(); /** * optional string blockPoolId = 4; * @return The bytes for blockPoolId. */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes(); /** * optional uint64 blockId = 5; * @return Whether the blockId field is set. */ boolean hasBlockId(); /** * optional uint64 blockId = 5; * @return The blockId. */ long getBlockId(); /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @return A list containing the modes. */ java.util.List getModesList(); /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @return The count of modes. */ int getModesCount(); /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @param index The index of the element to return. * @return The modes at the given index. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return A list containing the storageTypes. */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return The count of storageTypes. */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index of the element to return. * @return The storageTypes at the given index. */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); /** * repeated string storageIds = 8; * @return A list containing the storageIds. */ java.util.List getStorageIdsList(); /** * repeated string storageIds = 8; * @return The count of storageIds. */ int getStorageIdsCount(); /** * repeated string storageIds = 8; * @param index The index of the element to return. * @return The storageIds at the given index. */ java.lang.String getStorageIds(int index); /** * repeated string storageIds = 8; * @param index The index of the value to return. * @return The bytes of the storageIds at the given index. */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIdsBytes(int index); /** * optional bytes handshakeSecret = 9; * @return Whether the handshakeSecret field is set. */ boolean hasHandshakeSecret(); /** * optional bytes handshakeSecret = 9; * @return The handshakeSecret. */ org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret(); } /** *
   **
   * Secret information for the BlockKeyProto. This is not sent on the wire as
   * such but is used to pack a byte array and encrypted and put in
   * BlockKeyProto.bytes
   * When adding further fields, make sure they are optional as they would
   * otherwise not be backwards compatible.
   * Note: As part of the migration from WritableUtils based tokens (aka "legacy")
   * to Protocol Buffers, we use the first byte to determine the type. If the
   * first byte is <=0 then it is a legacy token. This means that when using
   * protobuf tokens, the the first field sent must have a `field_number` less
   * than 16 to make sure that the first byte is positive. Otherwise it could be
   * parsed as a legacy token. See HDFS-11026 for more discussion.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockTokenSecretProto} */ public static final class BlockTokenSecretProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockTokenSecretProto) BlockTokenSecretProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockTokenSecretProto.newBuilder() to construct. private BlockTokenSecretProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockTokenSecretProto() { userId_ = ""; blockPoolId_ = ""; modes_ = java.util.Collections.emptyList(); storageTypes_ = java.util.Collections.emptyList(); storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new BlockTokenSecretProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.Builder.class); } private int bitField0_; public static final int EXPIRYDATE_FIELD_NUMBER = 1; private long expiryDate_ = 0L; /** * optional uint64 expiryDate = 1; * @return Whether the expiryDate field is set. */ @java.lang.Override public boolean hasExpiryDate() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 expiryDate = 1; * @return The expiryDate. */ @java.lang.Override public long getExpiryDate() { return expiryDate_; } public static final int KEYID_FIELD_NUMBER = 2; private int keyId_ = 0; /** * optional uint32 keyId = 2; * @return Whether the keyId field is set. */ @java.lang.Override public boolean hasKeyId() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 keyId = 2; * @return The keyId. */ @java.lang.Override public int getKeyId() { return keyId_; } public static final int USERID_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object userId_ = ""; /** * optional string userId = 3; * @return Whether the userId field is set. */ @java.lang.Override public boolean hasUserId() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string userId = 3; * @return The userId. */ @java.lang.Override public java.lang.String getUserId() { java.lang.Object ref = userId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { userId_ = s; } return s; } } /** * optional string userId = 3; * @return The bytes for userId. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getUserIdBytes() { java.lang.Object ref = userId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); userId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int BLOCKPOOLID_FIELD_NUMBER = 4; @SuppressWarnings("serial") private volatile java.lang.Object blockPoolId_ = ""; /** * optional string blockPoolId = 4; * @return Whether the blockPoolId field is set. */ @java.lang.Override public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000008) != 0); } /** * optional string blockPoolId = 4; * @return The blockPoolId. */ @java.lang.Override public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * optional string blockPoolId = 4; * @return The bytes for blockPoolId. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int BLOCKID_FIELD_NUMBER = 5; private long blockId_ = 0L; /** * optional uint64 blockId = 5; * @return Whether the blockId field is set. */ @java.lang.Override public boolean hasBlockId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockId = 5; * @return The blockId. */ @java.lang.Override public long getBlockId() { return blockId_; } public static final int MODES_FIELD_NUMBER = 6; @SuppressWarnings("serial") private java.util.List modes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto> modes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto convert(java.lang.Integer from) { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.forNumber(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.READ : result; } }; /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @return A list containing the modes. */ @java.lang.Override public java.util.List getModesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>(modes_, modes_converter_); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @return The count of modes. */ @java.lang.Override public int getModesCount() { return modes_.size(); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @param index The index of the element to return. * @return The modes at the given index. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index) { return modes_converter_.convert(modes_.get(index)); } public static final int STORAGETYPES_FIELD_NUMBER = 7; @SuppressWarnings("serial") private java.util.List storageTypes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } }; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return A list containing the storageTypes. */ @java.lang.Override public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return The count of storageTypes. */ @java.lang.Override public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index of the element to return. * @return The storageTypes at the given index. */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } public static final int STORAGEIDS_FIELD_NUMBER = 8; @SuppressWarnings("serial") private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIds_; /** * repeated string storageIds = 8; * @return A list containing the storageIds. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIdsList() { return storageIds_; } /** * repeated string storageIds = 8; * @return The count of storageIds. */ public int getStorageIdsCount() { return storageIds_.size(); } /** * repeated string storageIds = 8; * @param index The index of the element to return. * @return The storageIds at the given index. */ public java.lang.String getStorageIds(int index) { return storageIds_.get(index); } /** * repeated string storageIds = 8; * @param index The index of the value to return. * @return The bytes of the storageIds at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIdsBytes(int index) { return storageIds_.getByteString(index); } public static final int HANDSHAKESECRET_FIELD_NUMBER = 9; private org.apache.hadoop.thirdparty.protobuf.ByteString handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes handshakeSecret = 9; * @return Whether the handshakeSecret field is set. */ @java.lang.Override public boolean hasHandshakeSecret() { return ((bitField0_ & 0x00000020) != 0); } /** * optional bytes handshakeSecret = 9; * @return The handshakeSecret. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret() { return handshakeSecret_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, expiryDate_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, keyId_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, userId_); } if (((bitField0_ & 0x00000008) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, blockPoolId_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, blockId_); } for (int i = 0; i < modes_.size(); i++) { output.writeEnum(6, modes_.get(i)); } for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(7, storageTypes_.get(i)); } for (int i = 0; i < storageIds_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, storageIds_.getRaw(i)); } if (((bitField0_ & 0x00000020) != 0)) { output.writeBytes(9, handshakeSecret_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, expiryDate_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, keyId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, userId_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, blockPoolId_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, blockId_); } { int dataSize = 0; for (int i = 0; i < modes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(modes_.get(i)); } size += dataSize; size += 1 * modes_.size(); } { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i)); } size += dataSize; size += 1 * storageTypes_.size(); } { int dataSize = 0; for (int i = 0; i < storageIds_.size(); i++) { dataSize += computeStringSizeNoTag(storageIds_.getRaw(i)); } size += dataSize; size += 1 * getStorageIdsList().size(); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(9, handshakeSecret_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto) obj; if (hasExpiryDate() != other.hasExpiryDate()) return false; if (hasExpiryDate()) { if (getExpiryDate() != other.getExpiryDate()) return false; } if (hasKeyId() != other.hasKeyId()) return false; if (hasKeyId()) { if (getKeyId() != other.getKeyId()) return false; } if (hasUserId() != other.hasUserId()) return false; if (hasUserId()) { if (!getUserId() .equals(other.getUserId())) return false; } if (hasBlockPoolId() != other.hasBlockPoolId()) return false; if (hasBlockPoolId()) { if (!getBlockPoolId() .equals(other.getBlockPoolId())) return false; } if (hasBlockId() != other.hasBlockId()) return false; if (hasBlockId()) { if (getBlockId() != other.getBlockId()) return false; } if (!modes_.equals(other.modes_)) return false; if (!storageTypes_.equals(other.storageTypes_)) return false; if (!getStorageIdsList() .equals(other.getStorageIdsList())) return false; if (hasHandshakeSecret() != other.hasHandshakeSecret()) return false; if (hasHandshakeSecret()) { if (!getHandshakeSecret() .equals(other.getHandshakeSecret())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getExpiryDate()); } if (hasKeyId()) { hash = (37 * hash) + KEYID_FIELD_NUMBER; hash = (53 * hash) + getKeyId(); } if (hasUserId()) { hash = (37 * hash) + USERID_FIELD_NUMBER; hash = (53 * hash) + getUserId().hashCode(); } if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockId()); } if (getModesCount() > 0) { hash = (37 * hash) + MODES_FIELD_NUMBER; hash = (53 * hash) + modes_.hashCode(); } if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + storageTypes_.hashCode(); } if (getStorageIdsCount() > 0) { hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageIdsList().hashCode(); } if (hasHandshakeSecret()) { hash = (37 * hash) + HANDSHAKESECRET_FIELD_NUMBER; hash = (53 * hash) + getHandshakeSecret().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Secret information for the BlockKeyProto. This is not sent on the wire as
     * such but is used to pack a byte array and encrypted and put in
     * BlockKeyProto.bytes
     * When adding further fields, make sure they are optional as they would
     * otherwise not be backwards compatible.
     * Note: As part of the migration from WritableUtils based tokens (aka "legacy")
     * to Protocol Buffers, we use the first byte to determine the type. If the
     * first byte is <=0 then it is a legacy token. This means that when using
     * protobuf tokens, the the first field sent must have a `field_number` less
     * than 16 to make sure that the first byte is positive. Otherwise it could be
     * parsed as a legacy token. See HDFS-11026 for more discussion.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockTokenSecretProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockTokenSecretProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; expiryDate_ = 0L; keyId_ = 0; userId_ = ""; blockPoolId_ = ""; blockId_ = 0L; modes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result) { if (((bitField0_ & 0x00000020) != 0)) { modes_ = java.util.Collections.unmodifiableList(modes_); bitField0_ = (bitField0_ & ~0x00000020); } result.modes_ = modes_; if (((bitField0_ & 0x00000040) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000040); } result.storageTypes_ = storageTypes_; if (((bitField0_ & 0x00000080) != 0)) { storageIds_ = storageIds_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000080); } result.storageIds_ = storageIds_; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.expiryDate_ = expiryDate_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.keyId_ = keyId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.userId_ = userId_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.blockPoolId_ = blockPoolId_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.blockId_ = blockId_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000100) != 0)) { result.handshakeSecret_ = handshakeSecret_; to_bitField0_ |= 0x00000020; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.getDefaultInstance()) return this; if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } if (other.hasKeyId()) { setKeyId(other.getKeyId()); } if (other.hasUserId()) { userId_ = other.userId_; bitField0_ |= 0x00000004; onChanged(); } if (other.hasBlockPoolId()) { blockPoolId_ = other.blockPoolId_; bitField0_ |= 0x00000008; onChanged(); } if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (!other.modes_.isEmpty()) { if (modes_.isEmpty()) { modes_ = other.modes_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureModesIsMutable(); modes_.addAll(other.modes_); } onChanged(); } if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } if (!other.storageIds_.isEmpty()) { if (storageIds_.isEmpty()) { storageIds_ = other.storageIds_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureStorageIdsIsMutable(); storageIds_.addAll(other.storageIds_); } onChanged(); } if (other.hasHandshakeSecret()) { setHandshakeSecret(other.getHandshakeSecret()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { expiryDate_ = input.readUInt64(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { keyId_ = input.readUInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { userId_ = input.readBytes(); bitField0_ |= 0x00000004; break; } // case 26 case 34: { blockPoolId_ = input.readBytes(); bitField0_ |= 0x00000008; break; } // case 34 case 40: { blockId_ = input.readUInt64(); bitField0_ |= 0x00000010; break; } // case 40 case 48: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(6, tmpRaw); } else { ensureModesIsMutable(); modes_.add(tmpRaw); } break; } // case 48 case 50: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(6, tmpRaw); } else { ensureModesIsMutable(); modes_.add(tmpRaw); } } input.popLimit(oldLimit); break; } // case 50 case 56: { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(7, tmpRaw); } else { ensureStorageTypesIsMutable(); storageTypes_.add(tmpRaw); } break; } // case 56 case 58: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int tmpRaw = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw); if (tmpValue == null) { mergeUnknownVarintField(7, tmpRaw); } else { ensureStorageTypesIsMutable(); storageTypes_.add(tmpRaw); } } input.popLimit(oldLimit); break; } // case 58 case 66: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); ensureStorageIdsIsMutable(); storageIds_.add(bs); break; } // case 66 case 74: { handshakeSecret_ = input.readBytes(); bitField0_ |= 0x00000100; break; } // case 74 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private long expiryDate_ ; /** * optional uint64 expiryDate = 1; * @return Whether the expiryDate field is set. */ @java.lang.Override public boolean hasExpiryDate() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 expiryDate = 1; * @return The expiryDate. */ @java.lang.Override public long getExpiryDate() { return expiryDate_; } /** * optional uint64 expiryDate = 1; * @param value The expiryDate to set. * @return This builder for chaining. */ public Builder setExpiryDate(long value) { expiryDate_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * optional uint64 expiryDate = 1; * @return This builder for chaining. */ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000001); expiryDate_ = 0L; onChanged(); return this; } private int keyId_ ; /** * optional uint32 keyId = 2; * @return Whether the keyId field is set. */ @java.lang.Override public boolean hasKeyId() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 keyId = 2; * @return The keyId. */ @java.lang.Override public int getKeyId() { return keyId_; } /** * optional uint32 keyId = 2; * @param value The keyId to set. * @return This builder for chaining. */ public Builder setKeyId(int value) { keyId_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * optional uint32 keyId = 2; * @return This builder for chaining. */ public Builder clearKeyId() { bitField0_ = (bitField0_ & ~0x00000002); keyId_ = 0; onChanged(); return this; } private java.lang.Object userId_ = ""; /** * optional string userId = 3; * @return Whether the userId field is set. */ public boolean hasUserId() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string userId = 3; * @return The userId. */ public java.lang.String getUserId() { java.lang.Object ref = userId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { userId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string userId = 3; * @return The bytes for userId. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getUserIdBytes() { java.lang.Object ref = userId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); userId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string userId = 3; * @param value The userId to set. * @return This builder for chaining. */ public Builder setUserId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } userId_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * optional string userId = 3; * @return This builder for chaining. */ public Builder clearUserId() { userId_ = getDefaultInstance().getUserId(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * optional string userId = 3; * @param value The bytes for userId to set. * @return This builder for chaining. */ public Builder setUserIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } userId_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } private java.lang.Object blockPoolId_ = ""; /** * optional string blockPoolId = 4; * @return Whether the blockPoolId field is set. */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000008) != 0); } /** * optional string blockPoolId = 4; * @return The blockPoolId. */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string blockPoolId = 4; * @return The bytes for blockPoolId. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string blockPoolId = 4; * @param value The blockPoolId to set. * @return This builder for chaining. */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } blockPoolId_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * optional string blockPoolId = 4; * @return This builder for chaining. */ public Builder clearBlockPoolId() { blockPoolId_ = getDefaultInstance().getBlockPoolId(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); return this; } /** * optional string blockPoolId = 4; * @param value The bytes for blockPoolId to set. * @return This builder for chaining. */ public Builder setBlockPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } blockPoolId_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } private long blockId_ ; /** * optional uint64 blockId = 5; * @return Whether the blockId field is set. */ @java.lang.Override public boolean hasBlockId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockId = 5; * @return The blockId. */ @java.lang.Override public long getBlockId() { return blockId_; } /** * optional uint64 blockId = 5; * @param value The blockId to set. * @return This builder for chaining. */ public Builder setBlockId(long value) { blockId_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * optional uint64 blockId = 5; * @return This builder for chaining. */ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000010); blockId_ = 0L; onChanged(); return this; } private java.util.List modes_ = java.util.Collections.emptyList(); private void ensureModesIsMutable() { if (!((bitField0_ & 0x00000020) != 0)) { modes_ = new java.util.ArrayList(modes_); bitField0_ |= 0x00000020; } } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @return A list containing the modes. */ public java.util.List getModesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>(modes_, modes_converter_); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @return The count of modes. */ public int getModesCount() { return modes_.size(); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @param index The index of the element to return. * @return The modes at the given index. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index) { return modes_converter_.convert(modes_.get(index)); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @param index The index to set the value at. * @param value The modes to set. * @return This builder for chaining. */ public Builder setModes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value) { if (value == null) { throw new NullPointerException(); } ensureModesIsMutable(); modes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @param value The modes to add. * @return This builder for chaining. */ public Builder addModes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value) { if (value == null) { throw new NullPointerException(); } ensureModesIsMutable(); modes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @param values The modes to add. * @return This builder for chaining. */ public Builder addAllModes( java.lang.Iterable values) { ensureModesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value : values) { modes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; * @return This builder for chaining. */ public Builder clearModes() { modes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000040; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return A list containing the storageTypes. */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return The count of storageTypes. */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index of the element to return. * @return The storageTypes at the given index. */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param index The index to set the value at. * @param value The storageTypes to set. * @return This builder for chaining. */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param value The storageTypes to add. * @return This builder for chaining. */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @param values The storageTypes to add. * @return This builder for chaining. */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) { storageTypes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; * @return This builder for chaining. */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageIdsIsMutable() { if (!((bitField0_ & 0x00000080) != 0)) { storageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageIds_); bitField0_ |= 0x00000080; } } /** * repeated string storageIds = 8; * @return A list containing the storageIds. */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIdsList() { return storageIds_.getUnmodifiableView(); } /** * repeated string storageIds = 8; * @return The count of storageIds. */ public int getStorageIdsCount() { return storageIds_.size(); } /** * repeated string storageIds = 8; * @param index The index of the element to return. * @return The storageIds at the given index. */ public java.lang.String getStorageIds(int index) { return storageIds_.get(index); } /** * repeated string storageIds = 8; * @param index The index of the value to return. * @return The bytes of the storageIds at the given index. */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIdsBytes(int index) { return storageIds_.getByteString(index); } /** * repeated string storageIds = 8; * @param index The index to set the value at. * @param value The storageIds to set. * @return This builder for chaining. */ public Builder setStorageIds( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIdsIsMutable(); storageIds_.set(index, value); onChanged(); return this; } /** * repeated string storageIds = 8; * @param value The storageIds to add. * @return This builder for chaining. */ public Builder addStorageIds( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIdsIsMutable(); storageIds_.add(value); onChanged(); return this; } /** * repeated string storageIds = 8; * @param values The storageIds to add. * @return This builder for chaining. */ public Builder addAllStorageIds( java.lang.Iterable values) { ensureStorageIdsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, storageIds_); onChanged(); return this; } /** * repeated string storageIds = 8; * @return This builder for chaining. */ public Builder clearStorageIds() { storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } /** * repeated string storageIds = 8; * @param value The bytes of the storageIds to add. * @return This builder for chaining. */ public Builder addStorageIdsBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageIdsIsMutable(); storageIds_.add(value); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes handshakeSecret = 9; * @return Whether the handshakeSecret field is set. */ @java.lang.Override public boolean hasHandshakeSecret() { return ((bitField0_ & 0x00000100) != 0); } /** * optional bytes handshakeSecret = 9; * @return The handshakeSecret. */ @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret() { return handshakeSecret_; } /** * optional bytes handshakeSecret = 9; * @param value The handshakeSecret to set. * @return This builder for chaining. */ public Builder setHandshakeSecret(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } handshakeSecret_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** * optional bytes handshakeSecret = 9; * @return This builder for chaining. */ public Builder clearHandshakeSecret() { bitField0_ = (bitField0_ & ~0x00000100); handshakeSecret_ = getDefaultInstance().getHandshakeSecret(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockTokenSecretProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockTokenSecretProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockTokenSecretProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RouterFederatedStateProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RouterFederatedStateProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ int getNamespaceStateIdsCount(); /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ boolean containsNamespaceStateIds( java.lang.String key); /** * Use {@link #getNamespaceStateIdsMap()} instead. */ @java.lang.Deprecated java.util.Map getNamespaceStateIds(); /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ java.util.Map getNamespaceStateIdsMap(); /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ long getNamespaceStateIdsOrDefault( java.lang.String key, long defaultValue); /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ long getNamespaceStateIdsOrThrow( java.lang.String key); } /** *
   **
   * Clients should receive this message in RPC responses and forward it
   * in RPC requests without interpreting it. It should be encoded
   * as an obscure byte array when being sent to clients.
   * 
* * Protobuf type {@code hadoop.hdfs.RouterFederatedStateProto} */ public static final class RouterFederatedStateProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.RouterFederatedStateProto) RouterFederatedStateProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RouterFederatedStateProto.newBuilder() to construct. private RouterFederatedStateProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RouterFederatedStateProto() { } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new RouterFederatedStateProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor; } @SuppressWarnings({"rawtypes"}) @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.MapField internalGetMapField( int number) { switch (number) { case 1: return internalGetNamespaceStateIds(); default: throw new RuntimeException( "Invalid map field number: " + number); } } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.Builder.class); } public static final int NAMESPACESTATEIDS_FIELD_NUMBER = 1; private static final class NamespaceStateIdsDefaultEntryHolder { static final org.apache.hadoop.thirdparty.protobuf.MapEntry< java.lang.String, java.lang.Long> defaultEntry = org.apache.hadoop.thirdparty.protobuf.MapEntry .newDefaultInstance( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor, org.apache.hadoop.thirdparty.protobuf.WireFormat.FieldType.STRING, "", org.apache.hadoop.thirdparty.protobuf.WireFormat.FieldType.INT64, 0L); } @SuppressWarnings("serial") private org.apache.hadoop.thirdparty.protobuf.MapField< java.lang.String, java.lang.Long> namespaceStateIds_; private org.apache.hadoop.thirdparty.protobuf.MapField internalGetNamespaceStateIds() { if (namespaceStateIds_ == null) { return org.apache.hadoop.thirdparty.protobuf.MapField.emptyMapField( NamespaceStateIdsDefaultEntryHolder.defaultEntry); } return namespaceStateIds_; } public int getNamespaceStateIdsCount() { return internalGetNamespaceStateIds().getMap().size(); } /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public boolean containsNamespaceStateIds( java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } return internalGetNamespaceStateIds().getMap().containsKey(key); } /** * Use {@link #getNamespaceStateIdsMap()} instead. */ @java.lang.Override @java.lang.Deprecated public java.util.Map getNamespaceStateIds() { return getNamespaceStateIdsMap(); } /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public java.util.Map getNamespaceStateIdsMap() { return internalGetNamespaceStateIds().getMap(); } /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public long getNamespaceStateIdsOrDefault( java.lang.String key, long defaultValue) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetNamespaceStateIds().getMap(); return map.containsKey(key) ? map.get(key) : defaultValue; } /** *
     * Last seen state IDs for multiple namespaces.
     * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public long getNamespaceStateIdsOrThrow( java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetNamespaceStateIds().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .serializeStringMapTo( output, internalGetNamespaceStateIds(), NamespaceStateIdsDefaultEntryHolder.defaultEntry, 1); getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (java.util.Map.Entry entry : internalGetNamespaceStateIds().getMap().entrySet()) { org.apache.hadoop.thirdparty.protobuf.MapEntry namespaceStateIds__ = NamespaceStateIdsDefaultEntryHolder.defaultEntry.newBuilderForType() .setKey(entry.getKey()) .setValue(entry.getValue()) .build(); size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, namespaceStateIds__); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto) obj; if (!internalGetNamespaceStateIds().equals( other.internalGetNamespaceStateIds())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (!internalGetNamespaceStateIds().getMap().isEmpty()) { hash = (37 * hash) + NAMESPACESTATEIDS_FIELD_NUMBER; hash = (53 * hash) + internalGetNamespaceStateIds().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Clients should receive this message in RPC responses and forward it
     * in RPC requests without interpreting it. It should be encoded
     * as an obscure byte array when being sent to clients.
     * 
* * Protobuf type {@code hadoop.hdfs.RouterFederatedStateProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RouterFederatedStateProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor; } @SuppressWarnings({"rawtypes"}) protected org.apache.hadoop.thirdparty.protobuf.MapField internalGetMapField( int number) { switch (number) { case 1: return internalGetNamespaceStateIds(); default: throw new RuntimeException( "Invalid map field number: " + number); } } @SuppressWarnings({"rawtypes"}) protected org.apache.hadoop.thirdparty.protobuf.MapField internalGetMutableMapField( int number) { switch (number) { case 1: return internalGetMutableNamespaceStateIds(); default: throw new RuntimeException( "Invalid map field number: " + number); } } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; internalGetMutableNamespaceStateIds().clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { result.namespaceStateIds_ = internalGetNamespaceStateIds(); result.namespaceStateIds_.makeImmutable(); } } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.getDefaultInstance()) return this; internalGetMutableNamespaceStateIds().mergeFrom( other.internalGetNamespaceStateIds()); bitField0_ |= 0x00000001; this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.MapEntry namespaceStateIds__ = input.readMessage( NamespaceStateIdsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); internalGetMutableNamespaceStateIds().getMutableMap().put( namespaceStateIds__.getKey(), namespaceStateIds__.getValue()); bitField0_ |= 0x00000001; break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.MapField< java.lang.String, java.lang.Long> namespaceStateIds_; private org.apache.hadoop.thirdparty.protobuf.MapField internalGetNamespaceStateIds() { if (namespaceStateIds_ == null) { return org.apache.hadoop.thirdparty.protobuf.MapField.emptyMapField( NamespaceStateIdsDefaultEntryHolder.defaultEntry); } return namespaceStateIds_; } private org.apache.hadoop.thirdparty.protobuf.MapField internalGetMutableNamespaceStateIds() { if (namespaceStateIds_ == null) { namespaceStateIds_ = org.apache.hadoop.thirdparty.protobuf.MapField.newMapField( NamespaceStateIdsDefaultEntryHolder.defaultEntry); } if (!namespaceStateIds_.isMutable()) { namespaceStateIds_ = namespaceStateIds_.copy(); } bitField0_ |= 0x00000001; onChanged(); return namespaceStateIds_; } public int getNamespaceStateIdsCount() { return internalGetNamespaceStateIds().getMap().size(); } /** *
       * Last seen state IDs for multiple namespaces.
       * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public boolean containsNamespaceStateIds( java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } return internalGetNamespaceStateIds().getMap().containsKey(key); } /** * Use {@link #getNamespaceStateIdsMap()} instead. */ @java.lang.Override @java.lang.Deprecated public java.util.Map getNamespaceStateIds() { return getNamespaceStateIdsMap(); } /** *
       * Last seen state IDs for multiple namespaces.
       * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public java.util.Map getNamespaceStateIdsMap() { return internalGetNamespaceStateIds().getMap(); } /** *
       * Last seen state IDs for multiple namespaces.
       * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public long getNamespaceStateIdsOrDefault( java.lang.String key, long defaultValue) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetNamespaceStateIds().getMap(); return map.containsKey(key) ? map.get(key) : defaultValue; } /** *
       * Last seen state IDs for multiple namespaces.
       * 
* * map<string, int64> namespaceStateIds = 1; */ @java.lang.Override public long getNamespaceStateIdsOrThrow( java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetNamespaceStateIds().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } public Builder clearNamespaceStateIds() { bitField0_ = (bitField0_ & ~0x00000001); internalGetMutableNamespaceStateIds().getMutableMap() .clear(); return this; } /** *
       * Last seen state IDs for multiple namespaces.
       * 
* * map<string, int64> namespaceStateIds = 1; */ public Builder removeNamespaceStateIds( java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } internalGetMutableNamespaceStateIds().getMutableMap() .remove(key); return this; } /** * Use alternate mutation accessors instead. */ @java.lang.Deprecated public java.util.Map getMutableNamespaceStateIds() { bitField0_ |= 0x00000001; return internalGetMutableNamespaceStateIds().getMutableMap(); } /** *
       * Last seen state IDs for multiple namespaces.
       * 
* * map<string, int64> namespaceStateIds = 1; */ public Builder putNamespaceStateIds( java.lang.String key, long value) { if (key == null) { throw new NullPointerException("map key"); } internalGetMutableNamespaceStateIds().getMutableMap() .put(key, value); bitField0_ |= 0x00000001; return this; } /** *
       * Last seen state IDs for multiple namespaces.
       * 
* * map<string, int64> namespaceStateIds = 1; */ public Builder putAllNamespaceStateIds( java.util.Map values) { internalGetMutableNamespaceStateIds().getMutableMap() .putAll(values); bitField0_ |= 0x00000001; return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RouterFederatedStateProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RouterFederatedStateProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RouterFederatedStateProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageReportProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypesProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CipherOptionProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ECSchemaProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_fieldAccessorTable; public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\nhdfs.proto\022\013hadoop.hdfs\032\016Security.prot" + "o\032\tacl.proto\"c\n\022ExtendedBlockProto\022\016\n\006po" + "olId\030\001 \002(\t\022\017\n\007blockId\030\002 \002(\004\022\027\n\017generatio" + "nStamp\030\003 \002(\004\022\023\n\010numBytes\030\004 \001(\004:\0010\"[\n\034Pro" + "videdStorageLocationProto\022\014\n\004path\030\001 \002(\t\022" + "\016\n\006offset\030\002 \002(\003\022\016\n\006length\030\003 \002(\003\022\r\n\005nonce" + "\030\004 \002(\014\"\231\001\n\017DatanodeIDProto\022\016\n\006ipAddr\030\001 \002" + "(\t\022\020\n\010hostName\030\002 \002(\t\022\024\n\014datanodeUuid\030\003 \002" + "(\t\022\020\n\010xferPort\030\004 \002(\r\022\020\n\010infoPort\030\005 \002(\r\022\017" + "\n\007ipcPort\030\006 \002(\r\022\031\n\016infoSecurePort\030\007 \001(\r:" + "\0010\"X\n\026DatanodeLocalInfoProto\022\027\n\017software" + "Version\030\001 \002(\t\022\025\n\rconfigVersion\030\002 \002(\t\022\016\n\006" + "uptime\030\003 \002(\004\"\315\001\n\027DatanodeVolumeInfoProto" + "\022\014\n\004path\030\001 \002(\t\0222\n\013storageType\030\002 \002(\0162\035.ha" + "doop.hdfs.StorageTypeProto\022\021\n\tusedSpace\030" + "\003 \002(\004\022\021\n\tfreeSpace\030\004 \002(\004\022\025\n\rreservedSpac" + "e\030\005 \002(\004\022 \n\030reservedSpaceForReplicas\030\006 \002(" + "\004\022\021\n\tnumBlocks\030\007 \002(\004\"G\n\022DatanodeInfosPro" + "to\0221\n\tdatanodes\030\001 \003(\0132\036.hadoop.hdfs.Data" + "nodeInfoProto\"\356\004\n\021DatanodeInfoProto\022(\n\002i" + "d\030\001 \002(\0132\034.hadoop.hdfs.DatanodeIDProto\022\023\n" + "\010capacity\030\002 \001(\004:\0010\022\022\n\007dfsUsed\030\003 \001(\004:\0010\022\024" + "\n\tremaining\030\004 \001(\004:\0010\022\030\n\rblockPoolUsed\030\005 " + "\001(\004:\0010\022\025\n\nlastUpdate\030\006 \001(\004:\0010\022\027\n\014xceiver" + "Count\030\007 \001(\r:\0010\022\020\n\010location\030\010 \001(\t\022\022\n\nnonD" + "fsUsed\030\t \001(\004\022E\n\nadminState\030\n \001(\0162).hadoo" + "p.hdfs.DatanodeInfoProto.AdminState:\006NOR" + "MAL\022\030\n\rcacheCapacity\030\013 \001(\004:\0010\022\024\n\tcacheUs" + "ed\030\014 \001(\004:\0010\022\036\n\023lastUpdateMonotonic\030\r \001(\004" + ":\0010\022\025\n\rupgradeDomain\030\016 \001(\t\022\036\n\023lastBlockR" + "eportTime\030\017 \001(\004:\0010\022#\n\030lastBlockReportMon" + "otonic\030\020 \001(\004:\0010\022\024\n\tnumBlocks\030\021 \001(\r:\0010\"w\n" + "\nAdminState\022\n\n\006NORMAL\020\000\022\033\n\027DECOMMISSION_" + "INPROGRESS\020\001\022\022\n\016DECOMMISSIONED\020\002\022\030\n\024ENTE" + "RING_MAINTENANCE\020\003\022\022\n\016IN_MAINTENANCE\020\004\"\336" + "\001\n\024DatanodeStorageProto\022\023\n\013storageUuid\030\001" + " \002(\t\022E\n\005state\030\002 \001(\0162..hadoop.hdfs.Datano" + "deStorageProto.StorageState:\006NORMAL\0228\n\013s" + "torageType\030\003 \001(\0162\035.hadoop.hdfs.StorageTy" + "peProto:\004DISK\"0\n\014StorageState\022\n\n\006NORMAL\020" + "\000\022\024\n\020READ_ONLY_SHARED\020\001\"\364\001\n\022StorageRepor" + "tProto\022\027\n\013storageUuid\030\001 \002(\tB\002\030\001\022\025\n\006faile" + "d\030\002 \001(\010:\005false\022\023\n\010capacity\030\003 \001(\004:\0010\022\022\n\007d" + "fsUsed\030\004 \001(\004:\0010\022\024\n\tremaining\030\005 \001(\004:\0010\022\030\n" + "\rblockPoolUsed\030\006 \001(\004:\0010\0222\n\007storage\030\007 \001(\013" + "2!.hadoop.hdfs.DatanodeStorageProto\022\022\n\nn" + "onDfsUsed\030\010 \001(\004\022\r\n\005mount\030\t \001(\t\"\332\002\n\023Conte" + "ntSummaryProto\022\016\n\006length\030\001 \002(\004\022\021\n\tfileCo" + "unt\030\002 \002(\004\022\026\n\016directoryCount\030\003 \002(\004\022\r\n\005quo" + "ta\030\004 \002(\004\022\025\n\rspaceConsumed\030\005 \002(\004\022\022\n\nspace" + "Quota\030\006 \002(\004\022?\n\016typeQuotaInfos\030\007 \001(\0132\'.ha" + "doop.hdfs.StorageTypeQuotaInfosProto\022\026\n\016" + "snapshotLength\030\010 \001(\004\022\031\n\021snapshotFileCoun" + "t\030\t \001(\004\022\036\n\026snapshotDirectoryCount\030\n \001(\004\022" + "\035\n\025snapshotSpaceConsumed\030\013 \001(\004\022\033\n\023erasur" + "eCodingPolicy\030\014 \001(\t\"\253\001\n\017QuotaUsageProto\022" + "\035\n\025fileAndDirectoryCount\030\001 \002(\004\022\r\n\005quota\030" + "\002 \002(\004\022\025\n\rspaceConsumed\030\003 \002(\004\022\022\n\nspaceQuo" + "ta\030\004 \002(\004\022?\n\016typeQuotaInfos\030\005 \001(\0132\'.hadoo" + "p.hdfs.StorageTypeQuotaInfosProto\"[\n\032Sto" + "rageTypeQuotaInfosProto\022=\n\rtypeQuotaInfo" + "\030\001 \003(\0132&.hadoop.hdfs.StorageTypeQuotaInf" + "oProto\"o\n\031StorageTypeQuotaInfoProto\0221\n\004t" + "ype\030\001 \001(\0162\035.hadoop.hdfs.StorageTypeProto" + ":\004DISK\022\r\n\005quota\030\002 \002(\004\022\020\n\010consumed\030\003 \002(\004\"" + "7\n\026CorruptFileBlocksProto\022\r\n\005files\030\001 \003(\t" + "\022\016\n\006cookie\030\002 \002(\t\"H\n\021StorageTypesProto\0223\n" + "\014storageTypes\030\001 \003(\0162\035.hadoop.hdfs.Storag" + "eTypeProto\"\364\001\n\027BlockStoragePolicyProto\022\020" + "\n\010policyId\030\001 \002(\r\022\014\n\004name\030\002 \002(\t\0226\n\016creati" + "onPolicy\030\003 \002(\0132\036.hadoop.hdfs.StorageType" + "sProto\022>\n\026creationFallbackPolicy\030\004 \001(\0132\036" + ".hadoop.hdfs.StorageTypesProto\022A\n\031replic" + "ationFallbackPolicy\030\005 \001(\0132\036.hadoop.hdfs." + "StorageTypesProto\"\342\002\n\021LocatedBlockProto\022" + "*\n\001b\030\001 \002(\0132\037.hadoop.hdfs.ExtendedBlockPr" + "oto\022\016\n\006offset\030\002 \002(\004\022,\n\004locs\030\003 \003(\0132\036.hado" + "op.hdfs.DatanodeInfoProto\022\017\n\007corrupt\030\004 \002" + "(\010\022-\n\nblockToken\030\005 \002(\0132\031.hadoop.common.T" + "okenProto\022\024\n\010isCached\030\006 \003(\010B\002\020\001\0223\n\014stora" + "geTypes\030\007 \003(\0162\035.hadoop.hdfs.StorageTypeP" + "roto\022\022\n\nstorageIDs\030\010 \003(\t\022\024\n\014blockIndices" + "\030\t \001(\014\022.\n\013blockTokens\030\n \003(\0132\031.hadoop.com" + "mon.TokenProto\"Q\n\026BatchedListingKeyProto" + "\022\020\n\010checksum\030\001 \002(\014\022\021\n\tpathIndex\030\002 \002(\r\022\022\n" + "\nstartAfter\030\003 \002(\014\"\223\001\n\026DataEncryptionKeyP" + "roto\022\r\n\005keyId\030\001 \002(\r\022\023\n\013blockPoolId\030\002 \002(\t" + "\022\r\n\005nonce\030\003 \002(\014\022\025\n\rencryptionKey\030\004 \002(\014\022\022" + "\n\nexpiryDate\030\005 \002(\004\022\033\n\023encryptionAlgorith" + "m\030\006 \001(\t\"\323\001\n\027FileEncryptionInfoProto\022,\n\005s" + "uite\030\001 \002(\0162\035.hadoop.hdfs.CipherSuiteProt" + "o\022F\n\025cryptoProtocolVersion\030\002 \002(\0162\'.hadoo" + "p.hdfs.CryptoProtocolVersionProto\022\013\n\003key" + "\030\003 \002(\014\022\n\n\002iv\030\004 \002(\014\022\017\n\007keyName\030\005 \002(\t\022\030\n\020e" + "zKeyVersionName\030\006 \002(\t\"O\n\032PerFileEncrypti" + "onInfoProto\022\013\n\003key\030\001 \002(\014\022\n\n\002iv\030\002 \002(\014\022\030\n\020" + "ezKeyVersionName\030\003 \002(\t\"\337\001\n\027ZoneEncryptio" + "nInfoProto\022,\n\005suite\030\001 \002(\0162\035.hadoop.hdfs." + "CipherSuiteProto\022F\n\025cryptoProtocolVersio" + "n\030\002 \002(\0162\'.hadoop.hdfs.CryptoProtocolVers" + "ionProto\022\017\n\007keyName\030\003 \002(\t\022=\n\021reencryptio" + "nProto\030\004 \001(\0132\".hadoop.hdfs.ReencryptionI" + "nfoProto\"\262\001\n\025ReencryptionInfoProto\022\030\n\020ez" + "KeyVersionName\030\001 \002(\t\022\026\n\016submissionTime\030\002" + " \002(\004\022\020\n\010canceled\030\003 \002(\010\022\026\n\016numReencrypted" + "\030\004 \002(\003\022\023\n\013numFailures\030\005 \002(\003\022\026\n\016completio" + "nTime\030\006 \001(\004\022\020\n\010lastFile\030\007 \001(\t\"}\n\021CipherO" + "ptionProto\022,\n\005suite\030\001 \002(\0162\035.hadoop.hdfs." + "CipherSuiteProto\022\r\n\005inKey\030\002 \001(\014\022\014\n\004inIv\030" + "\003 \001(\014\022\016\n\006outKey\030\004 \001(\014\022\r\n\005outIv\030\005 \001(\014\"\276\002\n" + "\022LocatedBlocksProto\022\022\n\nfileLength\030\001 \002(\004\022" + ".\n\006blocks\030\002 \003(\0132\036.hadoop.hdfs.LocatedBlo" + "ckProto\022\031\n\021underConstruction\030\003 \002(\010\0221\n\tla" + "stBlock\030\004 \001(\0132\036.hadoop.hdfs.LocatedBlock" + "Proto\022\033\n\023isLastBlockComplete\030\005 \002(\010\022@\n\022fi" + "leEncryptionInfo\030\006 \001(\0132$.hadoop.hdfs.Fil" + "eEncryptionInfoProto\0227\n\010ecPolicy\030\007 \001(\0132%" + ".hadoop.hdfs.ErasureCodingPolicyProto\"6\n" + "\030ECSchemaOptionEntryProto\022\013\n\003key\030\001 \002(\t\022\r" + "\n\005value\030\002 \002(\t\"\202\001\n\rECSchemaProto\022\021\n\tcodec" + "Name\030\001 \002(\t\022\021\n\tdataUnits\030\002 \002(\r\022\023\n\013parityU" + "nits\030\003 \002(\r\0226\n\007options\030\004 \003(\0132%.hadoop.hdf" + "s.ECSchemaOptionEntryProto\"\261\001\n\030ErasureCo" + "dingPolicyProto\022\014\n\004name\030\001 \001(\t\022*\n\006schema\030" + "\002 \001(\0132\032.hadoop.hdfs.ECSchemaProto\022\020\n\010cel" + "lSize\030\003 \001(\r\022\n\n\002id\030\004 \002(\r\022=\n\005state\030\005 \001(\0162%" + ".hadoop.hdfs.ErasureCodingPolicyState:\007E" + "NABLED\"\177\n#AddErasureCodingPolicyResponse" + "Proto\0225\n\006policy\030\001 \002(\0132%.hadoop.hdfs.Eras" + "ureCodingPolicyProto\022\017\n\007succeed\030\002 \002(\010\022\020\n" + "\010errorMsg\030\003 \001(\t\"K\n\035ECTopologyVerifierRes" + "ultProto\022\025\n\rresultMessage\030\001 \002(\t\022\023\n\013isSup" + "ported\030\002 \002(\010\"C\n\023HdfsPathHandleProto\022\017\n\007i" + "nodeId\030\001 \001(\004\022\r\n\005mtime\030\002 \001(\004\022\014\n\004path\030\003 \001(" + "\t\"\315\005\n\023HdfsFileStatusProto\022;\n\010fileType\030\001 " + "\002(\0162).hadoop.hdfs.HdfsFileStatusProto.Fi" + "leType\022\014\n\004path\030\002 \002(\014\022\016\n\006length\030\003 \002(\004\0222\n\n" + "permission\030\004 \002(\0132\036.hadoop.hdfs.FsPermiss" + "ionProto\022\r\n\005owner\030\005 \002(\t\022\r\n\005group\030\006 \002(\t\022\031" + "\n\021modification_time\030\007 \002(\004\022\023\n\013access_time" + "\030\010 \002(\004\022\017\n\007symlink\030\t \001(\014\022\034\n\021block_replica" + "tion\030\n \001(\r:\0010\022\024\n\tblocksize\030\013 \001(\004:\0010\0222\n\tl" + "ocations\030\014 \001(\0132\037.hadoop.hdfs.LocatedBloc" + "ksProto\022\021\n\006fileId\030\r \001(\004:\0010\022\027\n\013childrenNu" + "m\030\016 \001(\005:\002-1\022@\n\022fileEncryptionInfo\030\017 \001(\0132" + "$.hadoop.hdfs.FileEncryptionInfoProto\022\030\n" + "\rstoragePolicy\030\020 \001(\r:\0010\0227\n\010ecPolicy\030\021 \001(" + "\0132%.hadoop.hdfs.ErasureCodingPolicyProto" + "\022\020\n\005flags\030\022 \001(\r:\0010\022\021\n\tnamespace\030\023 \001(\t\"3\n" + "\010FileType\022\n\n\006IS_DIR\020\001\022\013\n\007IS_FILE\020\002\022\016\n\nIS" + "_SYMLINK\020\003\"E\n\005Flags\022\013\n\007HAS_ACL\020\001\022\r\n\tHAS_" + "CRYPT\020\002\022\n\n\006HAS_EC\020\004\022\024\n\020SNAPSHOT_ENABLED\020" + "\010\"y\n\031BlockChecksumOptionsProto\022F\n\021blockC" + "hecksumType\030\001 \001(\0162#.hadoop.hdfs.BlockChe" + "cksumTypeProto:\006MD5CRC\022\024\n\014stripeLength\030\002" + " \001(\004\"\344\002\n\025FsServerDefaultsProto\022\021\n\tblockS" + "ize\030\001 \002(\004\022\030\n\020bytesPerChecksum\030\002 \002(\r\022\027\n\017w" + "ritePacketSize\030\003 \002(\r\022\023\n\013replication\030\004 \002(" + "\r\022\026\n\016fileBufferSize\030\005 \002(\r\022\"\n\023encryptData" + "Transfer\030\006 \001(\010:\005false\022\030\n\rtrashInterval\030\007" + " \001(\004:\0010\022D\n\014checksumType\030\010 \001(\0162\036.hadoop.h" + "dfs.ChecksumTypeProto:\016CHECKSUM_CRC32\022\026\n" + "\016keyProviderUri\030\t \001(\t\022\023\n\010policyId\030\n \001(\r:" + "\0010\022\'\n\030snapshotTrashRootEnabled\030\013 \001(\010:\005fa" + "lse\"k\n\025DirectoryListingProto\0228\n\016partialL" + "isting\030\001 \003(\0132 .hadoop.hdfs.HdfsFileStatu" + "sProto\022\030\n\020remainingEntries\030\002 \002(\r\":\n\024Remo" + "teExceptionProto\022\021\n\tclassName\030\001 \002(\t\022\017\n\007m" + "essage\030\002 \001(\t\"\241\001\n\034BatchedDirectoryListing" + "Proto\0228\n\016partialListing\030\001 \003(\0132 .hadoop.h" + "dfs.HdfsFileStatusProto\022\021\n\tparentIdx\030\002 \002" + "(\r\0224\n\texception\030\003 \001(\0132!.hadoop.hdfs.Remo" + "teExceptionProto\"\242\001\n!SnapshottableDirect" + "oryStatusProto\0223\n\tdirStatus\030\001 \002(\0132 .hado" + "op.hdfs.HdfsFileStatusProto\022\026\n\016snapshot_" + "quota\030\002 \002(\r\022\027\n\017snapshot_number\030\003 \002(\r\022\027\n\017" + "parent_fullpath\030\004 \002(\014\"\212\001\n\023SnapshotStatus" + "Proto\0223\n\tdirStatus\030\001 \002(\0132 .hadoop.hdfs.H" + "dfsFileStatusProto\022\022\n\nsnapshotID\030\002 \002(\r\022\027" + "\n\017parent_fullpath\030\003 \002(\014\022\021\n\tisDeleted\030\004 \002" + "(\010\"u\n\"SnapshottableDirectoryListingProto" + "\022O\n\027snapshottableDirListing\030\001 \003(\0132..hado" + "op.hdfs.SnapshottableDirectoryStatusProt" + "o\"Q\n\024SnapshotListingProto\0229\n\017snapshotLis" + "ting\030\001 \003(\0132 .hadoop.hdfs.SnapshotStatusP" + "roto\"_\n\034SnapshotDiffReportEntryProto\022\020\n\010" + "fullpath\030\001 \002(\014\022\031\n\021modificationLabel\030\002 \002(" + "\t\022\022\n\ntargetPath\030\003 \001(\014\"\237\001\n\027SnapshotDiffRe" + "portProto\022\024\n\014snapshotRoot\030\001 \002(\t\022\024\n\014fromS" + "napshot\030\002 \002(\t\022\022\n\ntoSnapshot\030\003 \002(\t\022D\n\021dif" + "fReportEntries\030\004 \003(\0132).hadoop.hdfs.Snaps" + "hotDiffReportEntryProto\"\177\n#SnapshotDiffR" + "eportListingEntryProto\022\020\n\010fullpath\030\001 \002(\014" + "\022\r\n\005dirId\030\002 \002(\004\022\023\n\013isReference\030\003 \002(\010\022\022\n\n" + "targetPath\030\004 \001(\014\022\016\n\006fileId\030\005 \001(\004\"E\n\035Snap" + "shotDiffReportCursorProto\022\021\n\tstartPath\030\001" + " \002(\014\022\021\n\005index\030\002 \002(\005:\002-1\"\322\002\n\036SnapshotDiff" + "ReportListingProto\022I\n\017modifiedEntries\030\001 " + "\003(\01320.hadoop.hdfs.SnapshotDiffReportList" + "ingEntryProto\022H\n\016createdEntries\030\002 \003(\01320." + "hadoop.hdfs.SnapshotDiffReportListingEnt" + "ryProto\022H\n\016deletedEntries\030\003 \003(\01320.hadoop" + ".hdfs.SnapshotDiffReportListingEntryProt" + "o\022\025\n\risFromEarlier\030\004 \002(\010\022:\n\006cursor\030\005 \001(\013" + "2*.hadoop.hdfs.SnapshotDiffReportCursorP" + "roto\"D\n\nBlockProto\022\017\n\007blockId\030\001 \002(\004\022\020\n\010g" + "enStamp\030\002 \002(\004\022\023\n\010numBytes\030\003 \001(\004:\0010\"\245\001\n\021S" + "napshotInfoProto\022\024\n\014snapshotName\030\001 \002(\t\022\024" + "\n\014snapshotRoot\030\002 \002(\t\0222\n\npermission\030\003 \002(\013" + "2\036.hadoop.hdfs.FsPermissionProto\022\r\n\005owne" + "r\030\004 \002(\t\022\r\n\005group\030\005 \002(\t\022\022\n\ncreateTime\030\006 \002" + "(\t\"J\n\031RollingUpgradeStatusProto\022\023\n\013block" + "PoolId\030\001 \002(\t\022\030\n\tfinalized\030\002 \001(\010:\005false\")" + "\n\021StorageUuidsProto\022\024\n\014storageUuids\030\001 \003(" + "\t\"\377\001\n\025BlockTokenSecretProto\022\022\n\nexpiryDat" + "e\030\001 \001(\004\022\r\n\005keyId\030\002 \001(\r\022\016\n\006userId\030\003 \001(\t\022\023" + "\n\013blockPoolId\030\004 \001(\t\022\017\n\007blockId\030\005 \001(\004\022+\n\005" + "modes\030\006 \003(\0162\034.hadoop.hdfs.AccessModeProt" + "o\0223\n\014storageTypes\030\007 \003(\0162\035.hadoop.hdfs.St" + "orageTypeProto\022\022\n\nstorageIds\030\010 \003(\t\022\027\n\017ha" + "ndshakeSecret\030\t \001(\014\"\257\001\n\031RouterFederatedS" + "tateProto\022X\n\021namespaceStateIds\030\001 \003(\0132=.h" + "adoop.hdfs.RouterFederatedStateProto.Nam" + "espaceStateIdsEntry\0328\n\026NamespaceStateIds" + "Entry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001*Z\n" + "\020StorageTypeProto\022\010\n\004DISK\020\001\022\007\n\003SSD\020\002\022\013\n\007" + "ARCHIVE\020\003\022\014\n\010RAM_DISK\020\004\022\014\n\010PROVIDED\020\005\022\n\n" + "\006NVDIMM\020\006*-\n\016BlockTypeProto\022\016\n\nCONTIGUOU" + "S\020\000\022\013\n\007STRIPED\020\001*M\n\020CipherSuiteProto\022\013\n\007" + "UNKNOWN\020\001\022\025\n\021AES_CTR_NOPADDING\020\002\022\025\n\021SM4_" + "CTR_NOPADDING\020\003*P\n\032CryptoProtocolVersion" + "Proto\022\034\n\030UNKNOWN_PROTOCOL_VERSION\020\001\022\024\n\020E" + "NCRYPTION_ZONES\020\002*B\n\030ErasureCodingPolicy" + "State\022\014\n\010DISABLED\020\001\022\013\n\007ENABLED\020\002\022\013\n\007REMO" + "VED\020\003*O\n\021ChecksumTypeProto\022\021\n\rCHECKSUM_N" + "ULL\020\000\022\022\n\016CHECKSUM_CRC32\020\001\022\023\n\017CHECKSUM_CR" + "C32C\020\002*7\n\026BlockChecksumTypeProto\022\n\n\006MD5C" + "RC\020\001\022\021\n\rCOMPOSITE_CRC\020\002*=\n\017AccessModePro" + "to\022\010\n\004READ\020\001\022\t\n\005WRITE\020\002\022\010\n\004COPY\020\003\022\013\n\007REP" + "LACE\020\004B6\n%org.apache.hadoop.hdfs.protoco" + "l.protoB\nHdfsProtos\240\001\001" }; descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(), }); internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor, new java.lang.String[] { "PoolId", "BlockId", "GenerationStamp", "NumBytes", }); internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor, new java.lang.String[] { "Path", "Offset", "Length", "Nonce", }); internal_static_hadoop_hdfs_DatanodeIDProto_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeIDProto_descriptor, new java.lang.String[] { "IpAddr", "HostName", "DatanodeUuid", "XferPort", "InfoPort", "IpcPort", "InfoSecurePort", }); internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor, new java.lang.String[] { "SoftwareVersion", "ConfigVersion", "Uptime", }); internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor, new java.lang.String[] { "Path", "StorageType", "UsedSpace", "FreeSpace", "ReservedSpace", "ReservedSpaceForReplicas", "NumBlocks", }); internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor, new java.lang.String[] { "Datanodes", }); internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor, new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "NonDfsUsed", "AdminState", "CacheCapacity", "CacheUsed", "LastUpdateMonotonic", "UpgradeDomain", "LastBlockReportTime", "LastBlockReportMonotonic", "NumBlocks", }); internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor, new java.lang.String[] { "StorageUuid", "State", "StorageType", }); internal_static_hadoop_hdfs_StorageReportProto_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageReportProto_descriptor, new java.lang.String[] { "StorageUuid", "Failed", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "Storage", "NonDfsUsed", "Mount", }); internal_static_hadoop_hdfs_ContentSummaryProto_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ContentSummaryProto_descriptor, new java.lang.String[] { "Length", "FileCount", "DirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", "TypeQuotaInfos", "SnapshotLength", "SnapshotFileCount", "SnapshotDirectoryCount", "SnapshotSpaceConsumed", "ErasureCodingPolicy", }); internal_static_hadoop_hdfs_QuotaUsageProto_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_QuotaUsageProto_descriptor, new java.lang.String[] { "FileAndDirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", "TypeQuotaInfos", }); internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor = getDescriptor().getMessageTypes().get(11); internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor, new java.lang.String[] { "TypeQuotaInfo", }); internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor, new java.lang.String[] { "Type", "Quota", "Consumed", }); internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor, new java.lang.String[] { "Files", "Cookie", }); internal_static_hadoop_hdfs_StorageTypesProto_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypesProto_descriptor, new java.lang.String[] { "StorageTypes", }); internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor, new java.lang.String[] { "PolicyId", "Name", "CreationPolicy", "CreationFallbackPolicy", "ReplicationFallbackPolicy", }); internal_static_hadoop_hdfs_LocatedBlockProto_descriptor = getDescriptor().getMessageTypes().get(16); internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_LocatedBlockProto_descriptor, new java.lang.String[] { "B", "Offset", "Locs", "Corrupt", "BlockToken", "IsCached", "StorageTypes", "StorageIDs", "BlockIndices", "BlockTokens", }); internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor = getDescriptor().getMessageTypes().get(17); internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor, new java.lang.String[] { "Checksum", "PathIndex", "StartAfter", }); internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor, new java.lang.String[] { "KeyId", "BlockPoolId", "Nonce", "EncryptionKey", "ExpiryDate", "EncryptionAlgorithm", }); internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(19); internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor, new java.lang.String[] { "Suite", "CryptoProtocolVersion", "Key", "Iv", "KeyName", "EzKeyVersionName", }); internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor, new java.lang.String[] { "Key", "Iv", "EzKeyVersionName", }); internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(21); internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor, new java.lang.String[] { "Suite", "CryptoProtocolVersion", "KeyName", "ReencryptionProto", }); internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(22); internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor, new java.lang.String[] { "EzKeyVersionName", "SubmissionTime", "Canceled", "NumReencrypted", "NumFailures", "CompletionTime", "LastFile", }); internal_static_hadoop_hdfs_CipherOptionProto_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_CipherOptionProto_descriptor, new java.lang.String[] { "Suite", "InKey", "InIv", "OutKey", "OutIv", }); internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor, new java.lang.String[] { "FileLength", "Blocks", "UnderConstruction", "LastBlock", "IsLastBlockComplete", "FileEncryptionInfo", "EcPolicy", }); internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor = getDescriptor().getMessageTypes().get(25); internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor, new java.lang.String[] { "Key", "Value", }); internal_static_hadoop_hdfs_ECSchemaProto_descriptor = getDescriptor().getMessageTypes().get(26); internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ECSchemaProto_descriptor, new java.lang.String[] { "CodecName", "DataUnits", "ParityUnits", "Options", }); internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor = getDescriptor().getMessageTypes().get(27); internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor, new java.lang.String[] { "Name", "Schema", "CellSize", "Id", "State", }); internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor = getDescriptor().getMessageTypes().get(28); internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor, new java.lang.String[] { "Policy", "Succeed", "ErrorMsg", }); internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor = getDescriptor().getMessageTypes().get(29); internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor, new java.lang.String[] { "ResultMessage", "IsSupported", }); internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor = getDescriptor().getMessageTypes().get(30); internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor, new java.lang.String[] { "InodeId", "Mtime", "Path", }); internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor = getDescriptor().getMessageTypes().get(31); internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor, new java.lang.String[] { "FileType", "Path", "Length", "Permission", "Owner", "Group", "ModificationTime", "AccessTime", "Symlink", "BlockReplication", "Blocksize", "Locations", "FileId", "ChildrenNum", "FileEncryptionInfo", "StoragePolicy", "EcPolicy", "Flags", "Namespace", }); internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor = getDescriptor().getMessageTypes().get(32); internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor, new java.lang.String[] { "BlockChecksumType", "StripeLength", }); internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor = getDescriptor().getMessageTypes().get(33); internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor, new java.lang.String[] { "BlockSize", "BytesPerChecksum", "WritePacketSize", "Replication", "FileBufferSize", "EncryptDataTransfer", "TrashInterval", "ChecksumType", "KeyProviderUri", "PolicyId", "SnapshotTrashRootEnabled", }); internal_static_hadoop_hdfs_DirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(34); internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DirectoryListingProto_descriptor, new java.lang.String[] { "PartialListing", "RemainingEntries", }); internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor = getDescriptor().getMessageTypes().get(35); internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor, new java.lang.String[] { "ClassName", "Message", }); internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(36); internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor, new java.lang.String[] { "PartialListing", "ParentIdx", "Exception", }); internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor = getDescriptor().getMessageTypes().get(37); internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor, new java.lang.String[] { "DirStatus", "SnapshotQuota", "SnapshotNumber", "ParentFullpath", }); internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor = getDescriptor().getMessageTypes().get(38); internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor, new java.lang.String[] { "DirStatus", "SnapshotID", "ParentFullpath", "IsDeleted", }); internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(39); internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor, new java.lang.String[] { "SnapshottableDirListing", }); internal_static_hadoop_hdfs_SnapshotListingProto_descriptor = getDescriptor().getMessageTypes().get(40); internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotListingProto_descriptor, new java.lang.String[] { "SnapshotListing", }); internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor = getDescriptor().getMessageTypes().get(41); internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor, new java.lang.String[] { "Fullpath", "ModificationLabel", "TargetPath", }); internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor = getDescriptor().getMessageTypes().get(42); internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor, new java.lang.String[] { "SnapshotRoot", "FromSnapshot", "ToSnapshot", "DiffReportEntries", }); internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor = getDescriptor().getMessageTypes().get(43); internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor, new java.lang.String[] { "Fullpath", "DirId", "IsReference", "TargetPath", "FileId", }); internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor = getDescriptor().getMessageTypes().get(44); internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor, new java.lang.String[] { "StartPath", "Index", }); internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor = getDescriptor().getMessageTypes().get(45); internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor, new java.lang.String[] { "ModifiedEntries", "CreatedEntries", "DeletedEntries", "IsFromEarlier", "Cursor", }); internal_static_hadoop_hdfs_BlockProto_descriptor = getDescriptor().getMessageTypes().get(46); internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockProto_descriptor, new java.lang.String[] { "BlockId", "GenStamp", "NumBytes", }); internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor = getDescriptor().getMessageTypes().get(47); internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor, new java.lang.String[] { "SnapshotName", "SnapshotRoot", "Permission", "Owner", "Group", "CreateTime", }); internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor = getDescriptor().getMessageTypes().get(48); internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor, new java.lang.String[] { "BlockPoolId", "Finalized", }); internal_static_hadoop_hdfs_StorageUuidsProto_descriptor = getDescriptor().getMessageTypes().get(49); internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageUuidsProto_descriptor, new java.lang.String[] { "StorageUuids", }); internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor = getDescriptor().getMessageTypes().get(50); internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor, new java.lang.String[] { "ExpiryDate", "KeyId", "UserId", "BlockPoolId", "BlockId", "Modes", "StorageTypes", "StorageIds", "HandshakeSecret", }); internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor = getDescriptor().getMessageTypes().get(51); internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor, new java.lang.String[] { "NamespaceStateIds", }); internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor = internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor, new java.lang.String[] { "Key", "Value", }); org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(); org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy