All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.protocol.proto.HdfsProtos Maven / Gradle / Ivy

There is a newer version: 3.4.1
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: hdfs.proto

package org.apache.hadoop.hdfs.protocol.proto;

public final class HdfsProtos {
  private HdfsProtos() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  /**
   * 
   **
   * Types of recognized storage media.
   * 
* * Protobuf enum {@code hadoop.hdfs.StorageTypeProto} */ public enum StorageTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * DISK = 1; */ DISK(1), /** * SSD = 2; */ SSD(2), /** * ARCHIVE = 3; */ ARCHIVE(3), /** * RAM_DISK = 4; */ RAM_DISK(4), /** * PROVIDED = 5; */ PROVIDED(5), ; /** * DISK = 1; */ public static final int DISK_VALUE = 1; /** * SSD = 2; */ public static final int SSD_VALUE = 2; /** * ARCHIVE = 3; */ public static final int ARCHIVE_VALUE = 3; /** * RAM_DISK = 4; */ public static final int RAM_DISK_VALUE = 4; /** * PROVIDED = 5; */ public static final int PROVIDED_VALUE = 5; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static StorageTypeProto valueOf(int value) { return forNumber(value); } public static StorageTypeProto forNumber(int value) { switch (value) { case 1: return DISK; case 2: return SSD; case 3: return ARCHIVE; case 4: return RAM_DISK; case 5: return PROVIDED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< StorageTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public StorageTypeProto findValueByNumber(int number) { return StorageTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0); } private static final StorageTypeProto[] VALUES = values(); public static StorageTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private StorageTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.StorageTypeProto) } /** *
   **
   * Types of recognized blocks.
   * 
* * Protobuf enum {@code hadoop.hdfs.BlockTypeProto} */ public enum BlockTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * CONTIGUOUS = 0; */ CONTIGUOUS(0), /** * STRIPED = 1; */ STRIPED(1), ; /** * CONTIGUOUS = 0; */ public static final int CONTIGUOUS_VALUE = 0; /** * STRIPED = 1; */ public static final int STRIPED_VALUE = 1; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static BlockTypeProto valueOf(int value) { return forNumber(value); } public static BlockTypeProto forNumber(int value) { switch (value) { case 0: return CONTIGUOUS; case 1: return STRIPED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< BlockTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public BlockTypeProto findValueByNumber(int number) { return BlockTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(1); } private static final BlockTypeProto[] VALUES = values(); public static BlockTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private BlockTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockTypeProto) } /** *
   **
   * Cipher suite.
   * 
* * Protobuf enum {@code hadoop.hdfs.CipherSuiteProto} */ public enum CipherSuiteProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * UNKNOWN = 1; */ UNKNOWN(1), /** * AES_CTR_NOPADDING = 2; */ AES_CTR_NOPADDING(2), ; /** * UNKNOWN = 1; */ public static final int UNKNOWN_VALUE = 1; /** * AES_CTR_NOPADDING = 2; */ public static final int AES_CTR_NOPADDING_VALUE = 2; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static CipherSuiteProto valueOf(int value) { return forNumber(value); } public static CipherSuiteProto forNumber(int value) { switch (value) { case 1: return UNKNOWN; case 2: return AES_CTR_NOPADDING; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< CipherSuiteProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public CipherSuiteProto findValueByNumber(int number) { return CipherSuiteProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(2); } private static final CipherSuiteProto[] VALUES = values(); public static CipherSuiteProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private CipherSuiteProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CipherSuiteProto) } /** *
   **
   * Crypto protocol version used to access encrypted files.
   * 
* * Protobuf enum {@code hadoop.hdfs.CryptoProtocolVersionProto} */ public enum CryptoProtocolVersionProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * UNKNOWN_PROTOCOL_VERSION = 1; */ UNKNOWN_PROTOCOL_VERSION(1), /** * ENCRYPTION_ZONES = 2; */ ENCRYPTION_ZONES(2), ; /** * UNKNOWN_PROTOCOL_VERSION = 1; */ public static final int UNKNOWN_PROTOCOL_VERSION_VALUE = 1; /** * ENCRYPTION_ZONES = 2; */ public static final int ENCRYPTION_ZONES_VALUE = 2; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static CryptoProtocolVersionProto valueOf(int value) { return forNumber(value); } public static CryptoProtocolVersionProto forNumber(int value) { switch (value) { case 1: return UNKNOWN_PROTOCOL_VERSION; case 2: return ENCRYPTION_ZONES; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< CryptoProtocolVersionProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public CryptoProtocolVersionProto findValueByNumber(int number) { return CryptoProtocolVersionProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(3); } private static final CryptoProtocolVersionProto[] VALUES = values(); public static CryptoProtocolVersionProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private CryptoProtocolVersionProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CryptoProtocolVersionProto) } /** *
   **
   * EC policy state.
   * 
* * Protobuf enum {@code hadoop.hdfs.ErasureCodingPolicyState} */ public enum ErasureCodingPolicyState implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * DISABLED = 1; */ DISABLED(1), /** * ENABLED = 2; */ ENABLED(2), /** * REMOVED = 3; */ REMOVED(3), ; /** * DISABLED = 1; */ public static final int DISABLED_VALUE = 1; /** * ENABLED = 2; */ public static final int ENABLED_VALUE = 2; /** * REMOVED = 3; */ public static final int REMOVED_VALUE = 3; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static ErasureCodingPolicyState valueOf(int value) { return forNumber(value); } public static ErasureCodingPolicyState forNumber(int value) { switch (value) { case 1: return DISABLED; case 2: return ENABLED; case 3: return REMOVED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< ErasureCodingPolicyState> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public ErasureCodingPolicyState findValueByNumber(int number) { return ErasureCodingPolicyState.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(4); } private static final ErasureCodingPolicyState[] VALUES = values(); public static ErasureCodingPolicyState valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private ErasureCodingPolicyState(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ErasureCodingPolicyState) } /** *
   **
   * Checksum algorithms/types used in HDFS
   * Make sure this enum's integer values match enum values' id properties defined
   * in org.apache.hadoop.util.DataChecksum.Type
   * 
* * Protobuf enum {@code hadoop.hdfs.ChecksumTypeProto} */ public enum ChecksumTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * CHECKSUM_NULL = 0; */ CHECKSUM_NULL(0), /** * CHECKSUM_CRC32 = 1; */ CHECKSUM_CRC32(1), /** * CHECKSUM_CRC32C = 2; */ CHECKSUM_CRC32C(2), ; /** * CHECKSUM_NULL = 0; */ public static final int CHECKSUM_NULL_VALUE = 0; /** * CHECKSUM_CRC32 = 1; */ public static final int CHECKSUM_CRC32_VALUE = 1; /** * CHECKSUM_CRC32C = 2; */ public static final int CHECKSUM_CRC32C_VALUE = 2; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static ChecksumTypeProto valueOf(int value) { return forNumber(value); } public static ChecksumTypeProto forNumber(int value) { switch (value) { case 0: return CHECKSUM_NULL; case 1: return CHECKSUM_CRC32; case 2: return CHECKSUM_CRC32C; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< ChecksumTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public ChecksumTypeProto findValueByNumber(int number) { return ChecksumTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(5); } private static final ChecksumTypeProto[] VALUES = values(); public static ChecksumTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private ChecksumTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ChecksumTypeProto) } /** * Protobuf enum {@code hadoop.hdfs.BlockChecksumTypeProto} */ public enum BlockChecksumTypeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** *
     * BlockChecksum obtained by taking the MD5 digest of chunk CRCs
     * 
* * MD5CRC = 1; */ MD5CRC(1), /** *
     * Chunk-independent CRC, optionally striped
     * 
* * COMPOSITE_CRC = 2; */ COMPOSITE_CRC(2), ; /** *
     * BlockChecksum obtained by taking the MD5 digest of chunk CRCs
     * 
* * MD5CRC = 1; */ public static final int MD5CRC_VALUE = 1; /** *
     * Chunk-independent CRC, optionally striped
     * 
* * COMPOSITE_CRC = 2; */ public static final int COMPOSITE_CRC_VALUE = 2; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static BlockChecksumTypeProto valueOf(int value) { return forNumber(value); } public static BlockChecksumTypeProto forNumber(int value) { switch (value) { case 1: return MD5CRC; case 2: return COMPOSITE_CRC; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< BlockChecksumTypeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public BlockChecksumTypeProto findValueByNumber(int number) { return BlockChecksumTypeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(6); } private static final BlockChecksumTypeProto[] VALUES = values(); public static BlockChecksumTypeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private BlockChecksumTypeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockChecksumTypeProto) } /** *
   **
   * File access permissions mode.
   * 
* * Protobuf enum {@code hadoop.hdfs.AccessModeProto} */ public enum AccessModeProto implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * READ = 1; */ READ(1), /** * WRITE = 2; */ WRITE(2), /** * COPY = 3; */ COPY(3), /** * REPLACE = 4; */ REPLACE(4), ; /** * READ = 1; */ public static final int READ_VALUE = 1; /** * WRITE = 2; */ public static final int WRITE_VALUE = 2; /** * COPY = 3; */ public static final int COPY_VALUE = 3; /** * REPLACE = 4; */ public static final int REPLACE_VALUE = 4; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AccessModeProto valueOf(int value) { return forNumber(value); } public static AccessModeProto forNumber(int value) { switch (value) { case 1: return READ; case 2: return WRITE; case 3: return COPY; case 4: return REPLACE; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< AccessModeProto> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public AccessModeProto findValueByNumber(int number) { return AccessModeProto.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(7); } private static final AccessModeProto[] VALUES = values(); public static AccessModeProto valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private AccessModeProto(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.AccessModeProto) } public interface ExtendedBlockProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ExtendedBlockProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; */ boolean hasPoolId(); /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; */ java.lang.String getPoolId(); /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getPoolIdBytes(); /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; */ boolean hasBlockId(); /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; */ long getBlockId(); /** * required uint64 generationStamp = 3; */ boolean hasGenerationStamp(); /** * required uint64 generationStamp = 3; */ long getGenerationStamp(); /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; */ boolean hasNumBytes(); /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; */ long getNumBytes(); } /** *
   **
   * Extended block idenfies a block
   * 
* * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto} */ public static final class ExtendedBlockProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ExtendedBlockProto) ExtendedBlockProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ExtendedBlockProto.newBuilder() to construct. private ExtendedBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ExtendedBlockProto() { poolId_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ExtendedBlockProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; poolId_ = bs; break; } case 16: { bitField0_ |= 0x00000002; blockId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; generationStamp_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; numBytes_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class); } private int bitField0_; public static final int POOLID_FIELD_NUMBER = 1; private volatile java.lang.Object poolId_; /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; */ public boolean hasPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; */ public java.lang.String getPoolId() { java.lang.Object ref = poolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { poolId_ = s; } return s; } } /** *
     * Block pool id - globally unique across clusters
     * 
* * required string poolId = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPoolIdBytes() { java.lang.Object ref = poolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int BLOCKID_FIELD_NUMBER = 2; private long blockId_; /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * the local id within a pool
     * 
* * required uint64 blockId = 2; */ public long getBlockId() { return blockId_; } public static final int GENERATIONSTAMP_FIELD_NUMBER = 3; private long generationStamp_; /** * required uint64 generationStamp = 3; */ public boolean hasGenerationStamp() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 generationStamp = 3; */ public long getGenerationStamp() { return generationStamp_; } public static final int NUMBYTES_FIELD_NUMBER = 4; private long numBytes_; /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; */ public boolean hasNumBytes() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * len does not belong in ebid 
     * 
* * optional uint64 numBytes = 4 [default = 0]; */ public long getNumBytes() { return numBytes_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPoolId()) { memoizedIsInitialized = 0; return false; } if (!hasBlockId()) { memoizedIsInitialized = 0; return false; } if (!hasGenerationStamp()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, poolId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, blockId_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, generationStamp_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, numBytes_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, poolId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, blockId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, generationStamp_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, numBytes_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) obj; if (hasPoolId() != other.hasPoolId()) return false; if (hasPoolId()) { if (!getPoolId() .equals(other.getPoolId())) return false; } if (hasBlockId() != other.hasBlockId()) return false; if (hasBlockId()) { if (getBlockId() != other.getBlockId()) return false; } if (hasGenerationStamp() != other.hasGenerationStamp()) return false; if (hasGenerationStamp()) { if (getGenerationStamp() != other.getGenerationStamp()) return false; } if (hasNumBytes() != other.hasNumBytes()) return false; if (hasNumBytes()) { if (getNumBytes() != other.getNumBytes()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPoolId()) { hash = (37 * hash) + POOLID_FIELD_NUMBER; hash = (53 * hash) + getPoolId().hashCode(); } if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockId()); } if (hasGenerationStamp()) { hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getGenerationStamp()); } if (hasNumBytes()) { hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumBytes()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Extended block idenfies a block
     * 
* * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ExtendedBlockProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); poolId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); blockId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); generationStamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); numBytes_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.poolId_ = poolId_; if (((from_bitField0_ & 0x00000002) != 0)) { result.blockId_ = blockId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.generationStamp_ = generationStamp_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.numBytes_ = numBytes_; to_bitField0_ |= 0x00000008; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) return this; if (other.hasPoolId()) { bitField0_ |= 0x00000001; poolId_ = other.poolId_; onChanged(); } if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (other.hasGenerationStamp()) { setGenerationStamp(other.getGenerationStamp()); } if (other.hasNumBytes()) { setNumBytes(other.getNumBytes()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPoolId()) { return false; } if (!hasBlockId()) { return false; } if (!hasGenerationStamp()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object poolId_ = ""; /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; */ public boolean hasPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; */ public java.lang.String getPoolId() { java.lang.Object ref = poolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { poolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPoolIdBytes() { java.lang.Object ref = poolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; */ public Builder setPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolId_ = value; onChanged(); return this; } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; */ public Builder clearPoolId() { bitField0_ = (bitField0_ & ~0x00000001); poolId_ = getDefaultInstance().getPoolId(); onChanged(); return this; } /** *
       * Block pool id - globally unique across clusters
       * 
* * required string poolId = 1; */ public Builder setPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolId_ = value; onChanged(); return this; } private long blockId_ ; /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; */ public long getBlockId() { return blockId_; } /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; */ public Builder setBlockId(long value) { bitField0_ |= 0x00000002; blockId_ = value; onChanged(); return this; } /** *
       * the local id within a pool
       * 
* * required uint64 blockId = 2; */ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000002); blockId_ = 0L; onChanged(); return this; } private long generationStamp_ ; /** * required uint64 generationStamp = 3; */ public boolean hasGenerationStamp() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 generationStamp = 3; */ public long getGenerationStamp() { return generationStamp_; } /** * required uint64 generationStamp = 3; */ public Builder setGenerationStamp(long value) { bitField0_ |= 0x00000004; generationStamp_ = value; onChanged(); return this; } /** * required uint64 generationStamp = 3; */ public Builder clearGenerationStamp() { bitField0_ = (bitField0_ & ~0x00000004); generationStamp_ = 0L; onChanged(); return this; } private long numBytes_ ; /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; */ public boolean hasNumBytes() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; */ public long getNumBytes() { return numBytes_; } /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; */ public Builder setNumBytes(long value) { bitField0_ |= 0x00000008; numBytes_ = value; onChanged(); return this; } /** *
       * len does not belong in ebid 
       * 
* * optional uint64 numBytes = 4 [default = 0]; */ public Builder clearNumBytes() { bitField0_ = (bitField0_ & ~0x00000008); numBytes_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExtendedBlockProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ExtendedBlockProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ExtendedBlockProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ExtendedBlockProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ProvidedStorageLocationProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ProvidedStorageLocationProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes(); /** * required int64 offset = 2; */ boolean hasOffset(); /** * required int64 offset = 2; */ long getOffset(); /** * required int64 length = 3; */ boolean hasLength(); /** * required int64 length = 3; */ long getLength(); /** * required bytes nonce = 4; */ boolean hasNonce(); /** * required bytes nonce = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getNonce(); } /** * Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto} */ public static final class ProvidedStorageLocationProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ProvidedStorageLocationProto) ProvidedStorageLocationProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ProvidedStorageLocationProto.newBuilder() to construct. private ProvidedStorageLocationProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ProvidedStorageLocationProto() { path_ = ""; nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ProvidedStorageLocationProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; path_ = bs; break; } case 16: { bitField0_ |= 0x00000002; offset_ = input.readInt64(); break; } case 24: { bitField0_ |= 0x00000004; length_ = input.readInt64(); break; } case 34: { bitField0_ |= 0x00000008; nonce_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class); } private int bitField0_; public static final int PATH_FIELD_NUMBER = 1; private volatile java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int OFFSET_FIELD_NUMBER = 2; private long offset_; /** * required int64 offset = 2; */ public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** * required int64 offset = 2; */ public long getOffset() { return offset_; } public static final int LENGTH_FIELD_NUMBER = 3; private long length_; /** * required int64 length = 3; */ public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required int64 length = 3; */ public long getLength() { return length_; } public static final int NONCE_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_; /** * required bytes nonce = 4; */ public boolean hasNonce() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes nonce = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasNonce()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeInt64(2, offset_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeInt64(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, nonce_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(2, offset_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, nonce_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) obj; if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (hasOffset() != other.hasOffset()) return false; if (hasOffset()) { if (getOffset() != other.getOffset()) return false; } if (hasLength() != other.hasLength()) return false; if (hasLength()) { if (getLength() != other.getLength()) return false; } if (hasNonce() != other.hasNonce()) return false; if (hasNonce()) { if (!getNonce() .equals(other.getNonce())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasOffset()) { hash = (37 * hash) + OFFSET_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getOffset()); } if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLength()); } if (hasNonce()) { hash = (37 * hash) + NONCE_FIELD_NUMBER; hash = (53 * hash) + getNonce().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ProvidedStorageLocationProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); offset_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); length_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; if (((from_bitField0_ & 0x00000002) != 0)) { result.offset_ = offset_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.length_ = length_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.nonce_ = nonce_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } if (other.hasOffset()) { setOffset(other.getOffset()); } if (other.hasLength()) { setLength(other.getLength()); } if (other.hasNonce()) { setNonce(other.getNonce()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPath()) { return false; } if (!hasOffset()) { return false; } if (!hasLength()) { return false; } if (!hasNonce()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } private long offset_ ; /** * required int64 offset = 2; */ public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** * required int64 offset = 2; */ public long getOffset() { return offset_; } /** * required int64 offset = 2; */ public Builder setOffset(long value) { bitField0_ |= 0x00000002; offset_ = value; onChanged(); return this; } /** * required int64 offset = 2; */ public Builder clearOffset() { bitField0_ = (bitField0_ & ~0x00000002); offset_ = 0L; onChanged(); return this; } private long length_ ; /** * required int64 length = 3; */ public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required int64 length = 3; */ public long getLength() { return length_; } /** * required int64 length = 3; */ public Builder setLength(long value) { bitField0_ |= 0x00000004; length_ = value; onChanged(); return this; } /** * required int64 length = 3; */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000004); length_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes nonce = 4; */ public boolean hasNonce() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes nonce = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } /** * required bytes nonce = 4; */ public Builder setNonce(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; nonce_ = value; onChanged(); return this; } /** * required bytes nonce = 4; */ public Builder clearNonce() { bitField0_ = (bitField0_ & ~0x00000008); nonce_ = getDefaultInstance().getNonce(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ProvidedStorageLocationProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ProvidedStorageLocationProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ProvidedStorageLocationProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ProvidedStorageLocationProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeIDProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeIDProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * IP address
     * 
* * required string ipAddr = 1; */ boolean hasIpAddr(); /** *
     * IP address
     * 
* * required string ipAddr = 1; */ java.lang.String getIpAddr(); /** *
     * IP address
     * 
* * required string ipAddr = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getIpAddrBytes(); /** *
     * hostname
     * 
* * required string hostName = 2; */ boolean hasHostName(); /** *
     * hostname
     * 
* * required string hostName = 2; */ java.lang.String getHostName(); /** *
     * hostname
     * 
* * required string hostName = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getHostNameBytes(); /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; */ boolean hasDatanodeUuid(); /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; */ java.lang.String getDatanodeUuid(); /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getDatanodeUuidBytes(); /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; */ boolean hasXferPort(); /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; */ int getXferPort(); /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; */ boolean hasInfoPort(); /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; */ int getInfoPort(); /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; */ boolean hasIpcPort(); /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; */ int getIpcPort(); /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ boolean hasInfoSecurePort(); /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ int getInfoSecurePort(); } /** *
   **
   * Identifies a Datanode
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeIDProto} */ public static final class DatanodeIDProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeIDProto) DatanodeIDProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeIDProto.newBuilder() to construct. private DatanodeIDProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeIDProto() { ipAddr_ = ""; hostName_ = ""; datanodeUuid_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeIDProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; ipAddr_ = bs; break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; hostName_ = bs; break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; datanodeUuid_ = bs; break; } case 32: { bitField0_ |= 0x00000008; xferPort_ = input.readUInt32(); break; } case 40: { bitField0_ |= 0x00000010; infoPort_ = input.readUInt32(); break; } case 48: { bitField0_ |= 0x00000020; ipcPort_ = input.readUInt32(); break; } case 56: { bitField0_ |= 0x00000040; infoSecurePort_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); } private int bitField0_; public static final int IPADDR_FIELD_NUMBER = 1; private volatile java.lang.Object ipAddr_; /** *
     * IP address
     * 
* * required string ipAddr = 1; */ public boolean hasIpAddr() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * IP address
     * 
* * required string ipAddr = 1; */ public java.lang.String getIpAddr() { java.lang.Object ref = ipAddr_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ipAddr_ = s; } return s; } } /** *
     * IP address
     * 
* * required string ipAddr = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getIpAddrBytes() { java.lang.Object ref = ipAddr_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ipAddr_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int HOSTNAME_FIELD_NUMBER = 2; private volatile java.lang.Object hostName_; /** *
     * hostname
     * 
* * required string hostName = 2; */ public boolean hasHostName() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * hostname
     * 
* * required string hostName = 2; */ public java.lang.String getHostName() { java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { hostName_ = s; } return s; } } /** *
     * hostname
     * 
* * required string hostName = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getHostNameBytes() { java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); hostName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int DATANODEUUID_FIELD_NUMBER = 3; private volatile java.lang.Object datanodeUuid_; /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; */ public boolean hasDatanodeUuid() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; */ public java.lang.String getDatanodeUuid() { java.lang.Object ref = datanodeUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { datanodeUuid_ = s; } return s; } } /** *
     * UUID assigned to the Datanode. For
     * 
* * required string datanodeUuid = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getDatanodeUuidBytes() { java.lang.Object ref = datanodeUuid_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); datanodeUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int XFERPORT_FIELD_NUMBER = 4; private int xferPort_; /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; */ public boolean hasXferPort() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
* * required uint32 xferPort = 4; */ public int getXferPort() { return xferPort_; } public static final int INFOPORT_FIELD_NUMBER = 5; private int infoPort_; /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; */ public boolean hasInfoPort() { return ((bitField0_ & 0x00000010) != 0); } /** *
     * datanode http port
     * 
* * required uint32 infoPort = 5; */ public int getInfoPort() { return infoPort_; } public static final int IPCPORT_FIELD_NUMBER = 6; private int ipcPort_; /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; */ public boolean hasIpcPort() { return ((bitField0_ & 0x00000020) != 0); } /** *
     * ipc server port
     * 
* * required uint32 ipcPort = 6; */ public int getIpcPort() { return ipcPort_; } public static final int INFOSECUREPORT_FIELD_NUMBER = 7; private int infoSecurePort_; /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ public boolean hasInfoSecurePort() { return ((bitField0_ & 0x00000040) != 0); } /** *
     * datanode https port
     * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ public int getInfoSecurePort() { return infoSecurePort_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasIpAddr()) { memoizedIsInitialized = 0; return false; } if (!hasHostName()) { memoizedIsInitialized = 0; return false; } if (!hasDatanodeUuid()) { memoizedIsInitialized = 0; return false; } if (!hasXferPort()) { memoizedIsInitialized = 0; return false; } if (!hasInfoPort()) { memoizedIsInitialized = 0; return false; } if (!hasIpcPort()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, ipAddr_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, hostName_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, datanodeUuid_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, xferPort_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt32(5, infoPort_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt32(6, ipcPort_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt32(7, infoSecurePort_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, ipAddr_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, hostName_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, datanodeUuid_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, xferPort_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(5, infoPort_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(6, ipcPort_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(7, infoSecurePort_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) obj; if (hasIpAddr() != other.hasIpAddr()) return false; if (hasIpAddr()) { if (!getIpAddr() .equals(other.getIpAddr())) return false; } if (hasHostName() != other.hasHostName()) return false; if (hasHostName()) { if (!getHostName() .equals(other.getHostName())) return false; } if (hasDatanodeUuid() != other.hasDatanodeUuid()) return false; if (hasDatanodeUuid()) { if (!getDatanodeUuid() .equals(other.getDatanodeUuid())) return false; } if (hasXferPort() != other.hasXferPort()) return false; if (hasXferPort()) { if (getXferPort() != other.getXferPort()) return false; } if (hasInfoPort() != other.hasInfoPort()) return false; if (hasInfoPort()) { if (getInfoPort() != other.getInfoPort()) return false; } if (hasIpcPort() != other.hasIpcPort()) return false; if (hasIpcPort()) { if (getIpcPort() != other.getIpcPort()) return false; } if (hasInfoSecurePort() != other.hasInfoSecurePort()) return false; if (hasInfoSecurePort()) { if (getInfoSecurePort() != other.getInfoSecurePort()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasIpAddr()) { hash = (37 * hash) + IPADDR_FIELD_NUMBER; hash = (53 * hash) + getIpAddr().hashCode(); } if (hasHostName()) { hash = (37 * hash) + HOSTNAME_FIELD_NUMBER; hash = (53 * hash) + getHostName().hashCode(); } if (hasDatanodeUuid()) { hash = (37 * hash) + DATANODEUUID_FIELD_NUMBER; hash = (53 * hash) + getDatanodeUuid().hashCode(); } if (hasXferPort()) { hash = (37 * hash) + XFERPORT_FIELD_NUMBER; hash = (53 * hash) + getXferPort(); } if (hasInfoPort()) { hash = (37 * hash) + INFOPORT_FIELD_NUMBER; hash = (53 * hash) + getInfoPort(); } if (hasIpcPort()) { hash = (37 * hash) + IPCPORT_FIELD_NUMBER; hash = (53 * hash) + getIpcPort(); } if (hasInfoSecurePort()) { hash = (37 * hash) + INFOSECUREPORT_FIELD_NUMBER; hash = (53 * hash) + getInfoSecurePort(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Identifies a Datanode
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeIDProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeIDProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); ipAddr_ = ""; bitField0_ = (bitField0_ & ~0x00000001); hostName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); datanodeUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000004); xferPort_ = 0; bitField0_ = (bitField0_ & ~0x00000008); infoPort_ = 0; bitField0_ = (bitField0_ & ~0x00000010); ipcPort_ = 0; bitField0_ = (bitField0_ & ~0x00000020); infoSecurePort_ = 0; bitField0_ = (bitField0_ & ~0x00000040); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.ipAddr_ = ipAddr_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.hostName_ = hostName_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.datanodeUuid_ = datanodeUuid_; if (((from_bitField0_ & 0x00000008) != 0)) { result.xferPort_ = xferPort_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.infoPort_ = infoPort_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.ipcPort_ = ipcPort_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.infoSecurePort_ = infoSecurePort_; to_bitField0_ |= 0x00000040; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) return this; if (other.hasIpAddr()) { bitField0_ |= 0x00000001; ipAddr_ = other.ipAddr_; onChanged(); } if (other.hasHostName()) { bitField0_ |= 0x00000002; hostName_ = other.hostName_; onChanged(); } if (other.hasDatanodeUuid()) { bitField0_ |= 0x00000004; datanodeUuid_ = other.datanodeUuid_; onChanged(); } if (other.hasXferPort()) { setXferPort(other.getXferPort()); } if (other.hasInfoPort()) { setInfoPort(other.getInfoPort()); } if (other.hasIpcPort()) { setIpcPort(other.getIpcPort()); } if (other.hasInfoSecurePort()) { setInfoSecurePort(other.getInfoSecurePort()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasIpAddr()) { return false; } if (!hasHostName()) { return false; } if (!hasDatanodeUuid()) { return false; } if (!hasXferPort()) { return false; } if (!hasInfoPort()) { return false; } if (!hasIpcPort()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object ipAddr_ = ""; /** *
       * IP address
       * 
* * required string ipAddr = 1; */ public boolean hasIpAddr() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * IP address
       * 
* * required string ipAddr = 1; */ public java.lang.String getIpAddr() { java.lang.Object ref = ipAddr_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ipAddr_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * IP address
       * 
* * required string ipAddr = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getIpAddrBytes() { java.lang.Object ref = ipAddr_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ipAddr_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * IP address
       * 
* * required string ipAddr = 1; */ public Builder setIpAddr( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; ipAddr_ = value; onChanged(); return this; } /** *
       * IP address
       * 
* * required string ipAddr = 1; */ public Builder clearIpAddr() { bitField0_ = (bitField0_ & ~0x00000001); ipAddr_ = getDefaultInstance().getIpAddr(); onChanged(); return this; } /** *
       * IP address
       * 
* * required string ipAddr = 1; */ public Builder setIpAddrBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; ipAddr_ = value; onChanged(); return this; } private java.lang.Object hostName_ = ""; /** *
       * hostname
       * 
* * required string hostName = 2; */ public boolean hasHostName() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * hostname
       * 
* * required string hostName = 2; */ public java.lang.String getHostName() { java.lang.Object ref = hostName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { hostName_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * hostname
       * 
* * required string hostName = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getHostNameBytes() { java.lang.Object ref = hostName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); hostName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * hostname
       * 
* * required string hostName = 2; */ public Builder setHostName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; hostName_ = value; onChanged(); return this; } /** *
       * hostname
       * 
* * required string hostName = 2; */ public Builder clearHostName() { bitField0_ = (bitField0_ & ~0x00000002); hostName_ = getDefaultInstance().getHostName(); onChanged(); return this; } /** *
       * hostname
       * 
* * required string hostName = 2; */ public Builder setHostNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; hostName_ = value; onChanged(); return this; } private java.lang.Object datanodeUuid_ = ""; /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; */ public boolean hasDatanodeUuid() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; */ public java.lang.String getDatanodeUuid() { java.lang.Object ref = datanodeUuid_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { datanodeUuid_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getDatanodeUuidBytes() { java.lang.Object ref = datanodeUuid_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); datanodeUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; */ public Builder setDatanodeUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; datanodeUuid_ = value; onChanged(); return this; } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; */ public Builder clearDatanodeUuid() { bitField0_ = (bitField0_ & ~0x00000004); datanodeUuid_ = getDefaultInstance().getDatanodeUuid(); onChanged(); return this; } /** *
       * UUID assigned to the Datanode. For
       * 
* * required string datanodeUuid = 3; */ public Builder setDatanodeUuidBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; datanodeUuid_ = value; onChanged(); return this; } private int xferPort_ ; /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; */ public boolean hasXferPort() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; */ public int getXferPort() { return xferPort_; } /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; */ public Builder setXferPort(int value) { bitField0_ |= 0x00000008; xferPort_ = value; onChanged(); return this; } /** *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
* * required uint32 xferPort = 4; */ public Builder clearXferPort() { bitField0_ = (bitField0_ & ~0x00000008); xferPort_ = 0; onChanged(); return this; } private int infoPort_ ; /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; */ public boolean hasInfoPort() { return ((bitField0_ & 0x00000010) != 0); } /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; */ public int getInfoPort() { return infoPort_; } /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; */ public Builder setInfoPort(int value) { bitField0_ |= 0x00000010; infoPort_ = value; onChanged(); return this; } /** *
       * datanode http port
       * 
* * required uint32 infoPort = 5; */ public Builder clearInfoPort() { bitField0_ = (bitField0_ & ~0x00000010); infoPort_ = 0; onChanged(); return this; } private int ipcPort_ ; /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; */ public boolean hasIpcPort() { return ((bitField0_ & 0x00000020) != 0); } /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; */ public int getIpcPort() { return ipcPort_; } /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; */ public Builder setIpcPort(int value) { bitField0_ |= 0x00000020; ipcPort_ = value; onChanged(); return this; } /** *
       * ipc server port
       * 
* * required uint32 ipcPort = 6; */ public Builder clearIpcPort() { bitField0_ = (bitField0_ & ~0x00000020); ipcPort_ = 0; onChanged(); return this; } private int infoSecurePort_ ; /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ public boolean hasInfoSecurePort() { return ((bitField0_ & 0x00000040) != 0); } /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ public int getInfoSecurePort() { return infoSecurePort_; } /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ public Builder setInfoSecurePort(int value) { bitField0_ |= 0x00000040; infoSecurePort_ = value; onChanged(); return this; } /** *
       * datanode https port
       * 
* * optional uint32 infoSecurePort = 7 [default = 0]; */ public Builder clearInfoSecurePort() { bitField0_ = (bitField0_ & ~0x00000040); infoSecurePort_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeIDProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeIDProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeIDProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DatanodeIDProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeLocalInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeLocalInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string softwareVersion = 1; */ boolean hasSoftwareVersion(); /** * required string softwareVersion = 1; */ java.lang.String getSoftwareVersion(); /** * required string softwareVersion = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getSoftwareVersionBytes(); /** * required string configVersion = 2; */ boolean hasConfigVersion(); /** * required string configVersion = 2; */ java.lang.String getConfigVersion(); /** * required string configVersion = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getConfigVersionBytes(); /** * required uint64 uptime = 3; */ boolean hasUptime(); /** * required uint64 uptime = 3; */ long getUptime(); } /** *
   **
   * Datanode local information
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto} */ public static final class DatanodeLocalInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeLocalInfoProto) DatanodeLocalInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeLocalInfoProto.newBuilder() to construct. private DatanodeLocalInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeLocalInfoProto() { softwareVersion_ = ""; configVersion_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeLocalInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; softwareVersion_ = bs; break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; configVersion_ = bs; break; } case 24: { bitField0_ |= 0x00000004; uptime_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class); } private int bitField0_; public static final int SOFTWAREVERSION_FIELD_NUMBER = 1; private volatile java.lang.Object softwareVersion_; /** * required string softwareVersion = 1; */ public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000001) != 0); } /** * required string softwareVersion = 1; */ public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { softwareVersion_ = s; } return s; } } /** * required string softwareVersion = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int CONFIGVERSION_FIELD_NUMBER = 2; private volatile java.lang.Object configVersion_; /** * required string configVersion = 2; */ public boolean hasConfigVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required string configVersion = 2; */ public java.lang.String getConfigVersion() { java.lang.Object ref = configVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { configVersion_ = s; } return s; } } /** * required string configVersion = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getConfigVersionBytes() { java.lang.Object ref = configVersion_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); configVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int UPTIME_FIELD_NUMBER = 3; private long uptime_; /** * required uint64 uptime = 3; */ public boolean hasUptime() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 uptime = 3; */ public long getUptime() { return uptime_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSoftwareVersion()) { memoizedIsInitialized = 0; return false; } if (!hasConfigVersion()) { memoizedIsInitialized = 0; return false; } if (!hasUptime()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, softwareVersion_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, configVersion_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, uptime_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, softwareVersion_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, configVersion_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, uptime_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) obj; if (hasSoftwareVersion() != other.hasSoftwareVersion()) return false; if (hasSoftwareVersion()) { if (!getSoftwareVersion() .equals(other.getSoftwareVersion())) return false; } if (hasConfigVersion() != other.hasConfigVersion()) return false; if (hasConfigVersion()) { if (!getConfigVersion() .equals(other.getConfigVersion())) return false; } if (hasUptime() != other.hasUptime()) return false; if (hasUptime()) { if (getUptime() != other.getUptime()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSoftwareVersion()) { hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER; hash = (53 * hash) + getSoftwareVersion().hashCode(); } if (hasConfigVersion()) { hash = (37 * hash) + CONFIGVERSION_FIELD_NUMBER; hash = (53 * hash) + getConfigVersion().hashCode(); } if (hasUptime()) { hash = (37 * hash) + UPTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getUptime()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Datanode local information
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeLocalInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); softwareVersion_ = ""; bitField0_ = (bitField0_ & ~0x00000001); configVersion_ = ""; bitField0_ = (bitField0_ & ~0x00000002); uptime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.softwareVersion_ = softwareVersion_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.configVersion_ = configVersion_; if (((from_bitField0_ & 0x00000004) != 0)) { result.uptime_ = uptime_; to_bitField0_ |= 0x00000004; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) return this; if (other.hasSoftwareVersion()) { bitField0_ |= 0x00000001; softwareVersion_ = other.softwareVersion_; onChanged(); } if (other.hasConfigVersion()) { bitField0_ |= 0x00000002; configVersion_ = other.configVersion_; onChanged(); } if (other.hasUptime()) { setUptime(other.getUptime()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSoftwareVersion()) { return false; } if (!hasConfigVersion()) { return false; } if (!hasUptime()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object softwareVersion_ = ""; /** * required string softwareVersion = 1; */ public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000001) != 0); } /** * required string softwareVersion = 1; */ public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { softwareVersion_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string softwareVersion = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string softwareVersion = 1; */ public Builder setSoftwareVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; softwareVersion_ = value; onChanged(); return this; } /** * required string softwareVersion = 1; */ public Builder clearSoftwareVersion() { bitField0_ = (bitField0_ & ~0x00000001); softwareVersion_ = getDefaultInstance().getSoftwareVersion(); onChanged(); return this; } /** * required string softwareVersion = 1; */ public Builder setSoftwareVersionBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; softwareVersion_ = value; onChanged(); return this; } private java.lang.Object configVersion_ = ""; /** * required string configVersion = 2; */ public boolean hasConfigVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required string configVersion = 2; */ public java.lang.String getConfigVersion() { java.lang.Object ref = configVersion_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { configVersion_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string configVersion = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getConfigVersionBytes() { java.lang.Object ref = configVersion_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); configVersion_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string configVersion = 2; */ public Builder setConfigVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; configVersion_ = value; onChanged(); return this; } /** * required string configVersion = 2; */ public Builder clearConfigVersion() { bitField0_ = (bitField0_ & ~0x00000002); configVersion_ = getDefaultInstance().getConfigVersion(); onChanged(); return this; } /** * required string configVersion = 2; */ public Builder setConfigVersionBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; configVersion_ = value; onChanged(); return this; } private long uptime_ ; /** * required uint64 uptime = 3; */ public boolean hasUptime() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 uptime = 3; */ public long getUptime() { return uptime_; } /** * required uint64 uptime = 3; */ public Builder setUptime(long value) { bitField0_ |= 0x00000004; uptime_ = value; onChanged(); return this; } /** * required uint64 uptime = 3; */ public Builder clearUptime() { bitField0_ = (bitField0_ & ~0x00000004); uptime_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeLocalInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeLocalInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeLocalInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DatanodeLocalInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeVolumeInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeVolumeInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes(); /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ boolean hasStorageType(); /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType(); /** * required uint64 usedSpace = 3; */ boolean hasUsedSpace(); /** * required uint64 usedSpace = 3; */ long getUsedSpace(); /** * required uint64 freeSpace = 4; */ boolean hasFreeSpace(); /** * required uint64 freeSpace = 4; */ long getFreeSpace(); /** * required uint64 reservedSpace = 5; */ boolean hasReservedSpace(); /** * required uint64 reservedSpace = 5; */ long getReservedSpace(); /** * required uint64 reservedSpaceForReplicas = 6; */ boolean hasReservedSpaceForReplicas(); /** * required uint64 reservedSpaceForReplicas = 6; */ long getReservedSpaceForReplicas(); /** * required uint64 numBlocks = 7; */ boolean hasNumBlocks(); /** * required uint64 numBlocks = 7; */ long getNumBlocks(); } /** *
   **
   * Datanode volume information
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto} */ public static final class DatanodeVolumeInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeVolumeInfoProto) DatanodeVolumeInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeVolumeInfoProto.newBuilder() to construct. private DatanodeVolumeInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeVolumeInfoProto() { path_ = ""; storageType_ = 1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeVolumeInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; path_ = bs; break; } case 16: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; storageType_ = rawValue; } break; } case 24: { bitField0_ |= 0x00000004; usedSpace_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; freeSpace_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; reservedSpace_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; reservedSpaceForReplicas_ = input.readUInt64(); break; } case 56: { bitField0_ |= 0x00000040; numBlocks_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class); } private int bitField0_; public static final int PATH_FIELD_NUMBER = 1; private volatile java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int STORAGETYPE_FIELD_NUMBER = 2; private int storageType_; /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } public static final int USEDSPACE_FIELD_NUMBER = 3; private long usedSpace_; /** * required uint64 usedSpace = 3; */ public boolean hasUsedSpace() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 usedSpace = 3; */ public long getUsedSpace() { return usedSpace_; } public static final int FREESPACE_FIELD_NUMBER = 4; private long freeSpace_; /** * required uint64 freeSpace = 4; */ public boolean hasFreeSpace() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 freeSpace = 4; */ public long getFreeSpace() { return freeSpace_; } public static final int RESERVEDSPACE_FIELD_NUMBER = 5; private long reservedSpace_; /** * required uint64 reservedSpace = 5; */ public boolean hasReservedSpace() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 reservedSpace = 5; */ public long getReservedSpace() { return reservedSpace_; } public static final int RESERVEDSPACEFORREPLICAS_FIELD_NUMBER = 6; private long reservedSpaceForReplicas_; /** * required uint64 reservedSpaceForReplicas = 6; */ public boolean hasReservedSpaceForReplicas() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 reservedSpaceForReplicas = 6; */ public long getReservedSpaceForReplicas() { return reservedSpaceForReplicas_; } public static final int NUMBLOCKS_FIELD_NUMBER = 7; private long numBlocks_; /** * required uint64 numBlocks = 7; */ public boolean hasNumBlocks() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 numBlocks = 7; */ public long getNumBlocks() { return numBlocks_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasStorageType()) { memoizedIsInitialized = 0; return false; } if (!hasUsedSpace()) { memoizedIsInitialized = 0; return false; } if (!hasFreeSpace()) { memoizedIsInitialized = 0; return false; } if (!hasReservedSpace()) { memoizedIsInitialized = 0; return false; } if (!hasReservedSpaceForReplicas()) { memoizedIsInitialized = 0; return false; } if (!hasNumBlocks()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, storageType_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, usedSpace_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, freeSpace_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, reservedSpace_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, reservedSpaceForReplicas_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt64(7, numBlocks_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, storageType_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, usedSpace_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, freeSpace_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, reservedSpace_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, reservedSpaceForReplicas_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(7, numBlocks_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) obj; if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (hasStorageType() != other.hasStorageType()) return false; if (hasStorageType()) { if (storageType_ != other.storageType_) return false; } if (hasUsedSpace() != other.hasUsedSpace()) return false; if (hasUsedSpace()) { if (getUsedSpace() != other.getUsedSpace()) return false; } if (hasFreeSpace() != other.hasFreeSpace()) return false; if (hasFreeSpace()) { if (getFreeSpace() != other.getFreeSpace()) return false; } if (hasReservedSpace() != other.hasReservedSpace()) return false; if (hasReservedSpace()) { if (getReservedSpace() != other.getReservedSpace()) return false; } if (hasReservedSpaceForReplicas() != other.hasReservedSpaceForReplicas()) return false; if (hasReservedSpaceForReplicas()) { if (getReservedSpaceForReplicas() != other.getReservedSpaceForReplicas()) return false; } if (hasNumBlocks() != other.hasNumBlocks()) return false; if (hasNumBlocks()) { if (getNumBlocks() != other.getNumBlocks()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasStorageType()) { hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER; hash = (53 * hash) + storageType_; } if (hasUsedSpace()) { hash = (37 * hash) + USEDSPACE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getUsedSpace()); } if (hasFreeSpace()) { hash = (37 * hash) + FREESPACE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFreeSpace()); } if (hasReservedSpace()) { hash = (37 * hash) + RESERVEDSPACE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getReservedSpace()); } if (hasReservedSpaceForReplicas()) { hash = (37 * hash) + RESERVEDSPACEFORREPLICAS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getReservedSpaceForReplicas()); } if (hasNumBlocks()) { hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumBlocks()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Datanode volume information
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeVolumeInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); storageType_ = 1; bitField0_ = (bitField0_ & ~0x00000002); usedSpace_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); freeSpace_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); reservedSpace_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); reservedSpaceForReplicas_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); numBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.storageType_ = storageType_; if (((from_bitField0_ & 0x00000004) != 0)) { result.usedSpace_ = usedSpace_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.freeSpace_ = freeSpace_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.reservedSpace_ = reservedSpace_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.reservedSpaceForReplicas_ = reservedSpaceForReplicas_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.numBlocks_ = numBlocks_; to_bitField0_ |= 0x00000040; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } if (other.hasStorageType()) { setStorageType(other.getStorageType()); } if (other.hasUsedSpace()) { setUsedSpace(other.getUsedSpace()); } if (other.hasFreeSpace()) { setFreeSpace(other.getFreeSpace()); } if (other.hasReservedSpace()) { setReservedSpace(other.getReservedSpace()); } if (other.hasReservedSpaceForReplicas()) { setReservedSpaceForReplicas(other.getReservedSpaceForReplicas()); } if (other.hasNumBlocks()) { setNumBlocks(other.getNumBlocks()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPath()) { return false; } if (!hasStorageType()) { return false; } if (!hasUsedSpace()) { return false; } if (!hasFreeSpace()) { return false; } if (!hasReservedSpace()) { return false; } if (!hasReservedSpaceForReplicas()) { return false; } if (!hasNumBlocks()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } private int storageType_ = 1; /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; storageType_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.StorageTypeProto storageType = 2; */ public Builder clearStorageType() { bitField0_ = (bitField0_ & ~0x00000002); storageType_ = 1; onChanged(); return this; } private long usedSpace_ ; /** * required uint64 usedSpace = 3; */ public boolean hasUsedSpace() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 usedSpace = 3; */ public long getUsedSpace() { return usedSpace_; } /** * required uint64 usedSpace = 3; */ public Builder setUsedSpace(long value) { bitField0_ |= 0x00000004; usedSpace_ = value; onChanged(); return this; } /** * required uint64 usedSpace = 3; */ public Builder clearUsedSpace() { bitField0_ = (bitField0_ & ~0x00000004); usedSpace_ = 0L; onChanged(); return this; } private long freeSpace_ ; /** * required uint64 freeSpace = 4; */ public boolean hasFreeSpace() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 freeSpace = 4; */ public long getFreeSpace() { return freeSpace_; } /** * required uint64 freeSpace = 4; */ public Builder setFreeSpace(long value) { bitField0_ |= 0x00000008; freeSpace_ = value; onChanged(); return this; } /** * required uint64 freeSpace = 4; */ public Builder clearFreeSpace() { bitField0_ = (bitField0_ & ~0x00000008); freeSpace_ = 0L; onChanged(); return this; } private long reservedSpace_ ; /** * required uint64 reservedSpace = 5; */ public boolean hasReservedSpace() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 reservedSpace = 5; */ public long getReservedSpace() { return reservedSpace_; } /** * required uint64 reservedSpace = 5; */ public Builder setReservedSpace(long value) { bitField0_ |= 0x00000010; reservedSpace_ = value; onChanged(); return this; } /** * required uint64 reservedSpace = 5; */ public Builder clearReservedSpace() { bitField0_ = (bitField0_ & ~0x00000010); reservedSpace_ = 0L; onChanged(); return this; } private long reservedSpaceForReplicas_ ; /** * required uint64 reservedSpaceForReplicas = 6; */ public boolean hasReservedSpaceForReplicas() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 reservedSpaceForReplicas = 6; */ public long getReservedSpaceForReplicas() { return reservedSpaceForReplicas_; } /** * required uint64 reservedSpaceForReplicas = 6; */ public Builder setReservedSpaceForReplicas(long value) { bitField0_ |= 0x00000020; reservedSpaceForReplicas_ = value; onChanged(); return this; } /** * required uint64 reservedSpaceForReplicas = 6; */ public Builder clearReservedSpaceForReplicas() { bitField0_ = (bitField0_ & ~0x00000020); reservedSpaceForReplicas_ = 0L; onChanged(); return this; } private long numBlocks_ ; /** * required uint64 numBlocks = 7; */ public boolean hasNumBlocks() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 numBlocks = 7; */ public long getNumBlocks() { return numBlocks_; } /** * required uint64 numBlocks = 7; */ public Builder setNumBlocks(long value) { bitField0_ |= 0x00000040; numBlocks_ = value; onChanged(); return this; } /** * required uint64 numBlocks = 7; */ public Builder clearNumBlocks() { bitField0_ = (bitField0_ & ~0x00000040); numBlocks_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeVolumeInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeVolumeInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeVolumeInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DatanodeVolumeInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeInfosProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeInfosProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ java.util.List getDatanodesList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ int getDatanodesCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ java.util.List getDatanodesOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index); } /** *
   **
   * DatanodeInfo array
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto} */ public static final class DatanodeInfosProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeInfosProto) DatanodeInfosProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeInfosProto.newBuilder() to construct. private DatanodeInfosProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeInfosProto() { datanodes_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeInfosProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { datanodes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } datanodes_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { datanodes_ = java.util.Collections.unmodifiableList(datanodes_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class); } public static final int DATANODES_FIELD_NUMBER = 1; private java.util.List datanodes_; /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesList() { return datanodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesOrBuilderList() { return datanodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public int getDatanodesCount() { return datanodes_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { return datanodes_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index) { return datanodes_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getDatanodesCount(); i++) { if (!getDatanodes(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < datanodes_.size(); i++) { output.writeMessage(1, datanodes_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < datanodes_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, datanodes_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) obj; if (!getDatanodesList() .equals(other.getDatanodesList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getDatanodesCount() > 0) { hash = (37 * hash) + DATANODES_FIELD_NUMBER; hash = (53 * hash) + getDatanodesList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * DatanodeInfo array
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeInfosProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getDatanodesFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (datanodesBuilder_ == null) { datanodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { datanodesBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(this); int from_bitField0_ = bitField0_; if (datanodesBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { datanodes_ = java.util.Collections.unmodifiableList(datanodes_); bitField0_ = (bitField0_ & ~0x00000001); } result.datanodes_ = datanodes_; } else { result.datanodes_ = datanodesBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) return this; if (datanodesBuilder_ == null) { if (!other.datanodes_.isEmpty()) { if (datanodes_.isEmpty()) { datanodes_ = other.datanodes_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureDatanodesIsMutable(); datanodes_.addAll(other.datanodes_); } onChanged(); } } else { if (!other.datanodes_.isEmpty()) { if (datanodesBuilder_.isEmpty()) { datanodesBuilder_.dispose(); datanodesBuilder_ = null; datanodes_ = other.datanodes_; bitField0_ = (bitField0_ & ~0x00000001); datanodesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getDatanodesFieldBuilder() : null; } else { datanodesBuilder_.addAllMessages(other.datanodes_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getDatanodesCount(); i++) { if (!getDatanodes(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List datanodes_ = java.util.Collections.emptyList(); private void ensureDatanodesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { datanodes_ = new java.util.ArrayList(datanodes_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodesBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesList() { if (datanodesBuilder_ == null) { return java.util.Collections.unmodifiableList(datanodes_); } else { return datanodesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public int getDatanodesCount() { if (datanodesBuilder_ == null) { return datanodes_.size(); } else { return datanodesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { if (datanodesBuilder_ == null) { return datanodes_.get(index); } else { return datanodesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder setDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.set(index, value); onChanged(); } else { datanodesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder setDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.set(index, builderForValue.build()); onChanged(); } else { datanodesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.add(value); onChanged(); } else { datanodesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.add(index, value); onChanged(); } else { datanodesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.add(builderForValue.build()); onChanged(); } else { datanodesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.add(index, builderForValue.build()); onChanged(); } else { datanodesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addAllDatanodes( java.lang.Iterable values) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, datanodes_); onChanged(); } else { datanodesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder clearDatanodes() { if (datanodesBuilder_ == null) { datanodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { datanodesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder removeDatanodes(int index) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.remove(index); onChanged(); } else { datanodesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodesBuilder( int index) { return getDatanodesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index) { if (datanodesBuilder_ == null) { return datanodes_.get(index); } else { return datanodesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesOrBuilderList() { if (datanodesBuilder_ != null) { return datanodesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(datanodes_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder() { return getDatanodesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder( int index) { return getDatanodesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesBuilderList() { return getDatanodesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getDatanodesFieldBuilder() { if (datanodesBuilder_ == null) { datanodesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( datanodes_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); datanodes_ = null; } return datanodesBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfosProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfosProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeInfosProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DatanodeInfosProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ boolean hasId(); /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId(); /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder(); /** * optional uint64 capacity = 2 [default = 0]; */ boolean hasCapacity(); /** * optional uint64 capacity = 2 [default = 0]; */ long getCapacity(); /** * optional uint64 dfsUsed = 3 [default = 0]; */ boolean hasDfsUsed(); /** * optional uint64 dfsUsed = 3 [default = 0]; */ long getDfsUsed(); /** * optional uint64 remaining = 4 [default = 0]; */ boolean hasRemaining(); /** * optional uint64 remaining = 4 [default = 0]; */ long getRemaining(); /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ boolean hasBlockPoolUsed(); /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ long getBlockPoolUsed(); /** * optional uint64 lastUpdate = 6 [default = 0]; */ boolean hasLastUpdate(); /** * optional uint64 lastUpdate = 6 [default = 0]; */ long getLastUpdate(); /** * optional uint32 xceiverCount = 7 [default = 0]; */ boolean hasXceiverCount(); /** * optional uint32 xceiverCount = 7 [default = 0]; */ int getXceiverCount(); /** * optional string location = 8; */ boolean hasLocation(); /** * optional string location = 8; */ java.lang.String getLocation(); /** * optional string location = 8; */ org.apache.hadoop.thirdparty.protobuf.ByteString getLocationBytes(); /** * optional uint64 nonDfsUsed = 9; */ boolean hasNonDfsUsed(); /** * optional uint64 nonDfsUsed = 9; */ long getNonDfsUsed(); /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ boolean hasAdminState(); /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState(); /** * optional uint64 cacheCapacity = 11 [default = 0]; */ boolean hasCacheCapacity(); /** * optional uint64 cacheCapacity = 11 [default = 0]; */ long getCacheCapacity(); /** * optional uint64 cacheUsed = 12 [default = 0]; */ boolean hasCacheUsed(); /** * optional uint64 cacheUsed = 12 [default = 0]; */ long getCacheUsed(); /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ boolean hasLastUpdateMonotonic(); /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ long getLastUpdateMonotonic(); /** * optional string upgradeDomain = 14; */ boolean hasUpgradeDomain(); /** * optional string upgradeDomain = 14; */ java.lang.String getUpgradeDomain(); /** * optional string upgradeDomain = 14; */ org.apache.hadoop.thirdparty.protobuf.ByteString getUpgradeDomainBytes(); /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ boolean hasLastBlockReportTime(); /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ long getLastBlockReportTime(); /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ boolean hasLastBlockReportMonotonic(); /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ long getLastBlockReportMonotonic(); /** * optional uint32 numBlocks = 17 [default = 0]; */ boolean hasNumBlocks(); /** * optional uint32 numBlocks = 17 [default = 0]; */ int getNumBlocks(); } /** *
   **
   * The status of a Datanode
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto} */ public static final class DatanodeInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeInfoProto) DatanodeInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeInfoProto.newBuilder() to construct. private DatanodeInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeInfoProto() { location_ = ""; adminState_ = 0; upgradeDomain_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = id_.toBuilder(); } id_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(id_); id_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; capacity_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; dfsUsed_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; remaining_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; blockPoolUsed_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; lastUpdate_ = input.readUInt64(); break; } case 56: { bitField0_ |= 0x00000040; xceiverCount_ = input.readUInt32(); break; } case 66: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000080; location_ = bs; break; } case 72: { bitField0_ |= 0x00000100; nonDfsUsed_ = input.readUInt64(); break; } case 80: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(10, rawValue); } else { bitField0_ |= 0x00000200; adminState_ = rawValue; } break; } case 88: { bitField0_ |= 0x00000400; cacheCapacity_ = input.readUInt64(); break; } case 96: { bitField0_ |= 0x00000800; cacheUsed_ = input.readUInt64(); break; } case 104: { bitField0_ |= 0x00001000; lastUpdateMonotonic_ = input.readUInt64(); break; } case 114: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00002000; upgradeDomain_ = bs; break; } case 120: { bitField0_ |= 0x00004000; lastBlockReportTime_ = input.readUInt64(); break; } case 128: { bitField0_ |= 0x00008000; lastBlockReportMonotonic_ = input.readUInt64(); break; } case 136: { bitField0_ |= 0x00010000; numBlocks_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.DatanodeInfoProto.AdminState} */ public enum AdminState implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * NORMAL = 0; */ NORMAL(0), /** * DECOMMISSION_INPROGRESS = 1; */ DECOMMISSION_INPROGRESS(1), /** * DECOMMISSIONED = 2; */ DECOMMISSIONED(2), /** * ENTERING_MAINTENANCE = 3; */ ENTERING_MAINTENANCE(3), /** * IN_MAINTENANCE = 4; */ IN_MAINTENANCE(4), ; /** * NORMAL = 0; */ public static final int NORMAL_VALUE = 0; /** * DECOMMISSION_INPROGRESS = 1; */ public static final int DECOMMISSION_INPROGRESS_VALUE = 1; /** * DECOMMISSIONED = 2; */ public static final int DECOMMISSIONED_VALUE = 2; /** * ENTERING_MAINTENANCE = 3; */ public static final int ENTERING_MAINTENANCE_VALUE = 3; /** * IN_MAINTENANCE = 4; */ public static final int IN_MAINTENANCE_VALUE = 4; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AdminState valueOf(int value) { return forNumber(value); } public static AdminState forNumber(int value) { switch (value) { case 0: return NORMAL; case 1: return DECOMMISSION_INPROGRESS; case 2: return DECOMMISSIONED; case 3: return ENTERING_MAINTENANCE; case 4: return IN_MAINTENANCE; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< AdminState> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public AdminState findValueByNumber(int number) { return AdminState.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0); } private static final AdminState[] VALUES = values(); public static AdminState valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private AdminState(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeInfoProto.AdminState) } private int bitField0_; public static final int ID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_; /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } public static final int CAPACITY_FIELD_NUMBER = 2; private long capacity_; /** * optional uint64 capacity = 2 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 capacity = 2 [default = 0]; */ public long getCapacity() { return capacity_; } public static final int DFSUSED_FIELD_NUMBER = 3; private long dfsUsed_; /** * optional uint64 dfsUsed = 3 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } public static final int REMAINING_FIELD_NUMBER = 4; private long remaining_; /** * optional uint64 remaining = 4 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 remaining = 4 [default = 0]; */ public long getRemaining() { return remaining_; } public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5; private long blockPoolUsed_; /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } public static final int LASTUPDATE_FIELD_NUMBER = 6; private long lastUpdate_; /** * optional uint64 lastUpdate = 6 [default = 0]; */ public boolean hasLastUpdate() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public long getLastUpdate() { return lastUpdate_; } public static final int XCEIVERCOUNT_FIELD_NUMBER = 7; private int xceiverCount_; /** * optional uint32 xceiverCount = 7 [default = 0]; */ public boolean hasXceiverCount() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public int getXceiverCount() { return xceiverCount_; } public static final int LOCATION_FIELD_NUMBER = 8; private volatile java.lang.Object location_; /** * optional string location = 8; */ public boolean hasLocation() { return ((bitField0_ & 0x00000080) != 0); } /** * optional string location = 8; */ public java.lang.String getLocation() { java.lang.Object ref = location_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { location_ = s; } return s; } } /** * optional string location = 8; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getLocationBytes() { java.lang.Object ref = location_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); location_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int NONDFSUSED_FIELD_NUMBER = 9; private long nonDfsUsed_; /** * optional uint64 nonDfsUsed = 9; */ public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 nonDfsUsed = 9; */ public long getNonDfsUsed() { return nonDfsUsed_; } public static final int ADMINSTATE_FIELD_NUMBER = 10; private int adminState_; /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public boolean hasAdminState() { return ((bitField0_ & 0x00000200) != 0); } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(adminState_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL : result; } public static final int CACHECAPACITY_FIELD_NUMBER = 11; private long cacheCapacity_; /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public boolean hasCacheCapacity() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public long getCacheCapacity() { return cacheCapacity_; } public static final int CACHEUSED_FIELD_NUMBER = 12; private long cacheUsed_; /** * optional uint64 cacheUsed = 12 [default = 0]; */ public boolean hasCacheUsed() { return ((bitField0_ & 0x00000800) != 0); } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public long getCacheUsed() { return cacheUsed_; } public static final int LASTUPDATEMONOTONIC_FIELD_NUMBER = 13; private long lastUpdateMonotonic_; /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public boolean hasLastUpdateMonotonic() { return ((bitField0_ & 0x00001000) != 0); } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public long getLastUpdateMonotonic() { return lastUpdateMonotonic_; } public static final int UPGRADEDOMAIN_FIELD_NUMBER = 14; private volatile java.lang.Object upgradeDomain_; /** * optional string upgradeDomain = 14; */ public boolean hasUpgradeDomain() { return ((bitField0_ & 0x00002000) != 0); } /** * optional string upgradeDomain = 14; */ public java.lang.String getUpgradeDomain() { java.lang.Object ref = upgradeDomain_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { upgradeDomain_ = s; } return s; } } /** * optional string upgradeDomain = 14; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getUpgradeDomainBytes() { java.lang.Object ref = upgradeDomain_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); upgradeDomain_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int LASTBLOCKREPORTTIME_FIELD_NUMBER = 15; private long lastBlockReportTime_; /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ public boolean hasLastBlockReportTime() { return ((bitField0_ & 0x00004000) != 0); } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ public long getLastBlockReportTime() { return lastBlockReportTime_; } public static final int LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER = 16; private long lastBlockReportMonotonic_; /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ public boolean hasLastBlockReportMonotonic() { return ((bitField0_ & 0x00008000) != 0); } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ public long getLastBlockReportMonotonic() { return lastBlockReportMonotonic_; } public static final int NUMBLOCKS_FIELD_NUMBER = 17; private int numBlocks_; /** * optional uint32 numBlocks = 17 [default = 0]; */ public boolean hasNumBlocks() { return ((bitField0_ & 0x00010000) != 0); } /** * optional uint32 numBlocks = 17 [default = 0]; */ public int getNumBlocks() { return numBlocks_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasId()) { memoizedIsInitialized = 0; return false; } if (!getId().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getId()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, capacity_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, dfsUsed_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, remaining_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, blockPoolUsed_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, lastUpdate_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt32(7, xceiverCount_); } if (((bitField0_ & 0x00000080) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, location_); } if (((bitField0_ & 0x00000100) != 0)) { output.writeUInt64(9, nonDfsUsed_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeEnum(10, adminState_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeUInt64(11, cacheCapacity_); } if (((bitField0_ & 0x00000800) != 0)) { output.writeUInt64(12, cacheUsed_); } if (((bitField0_ & 0x00001000) != 0)) { output.writeUInt64(13, lastUpdateMonotonic_); } if (((bitField0_ & 0x00002000) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 14, upgradeDomain_); } if (((bitField0_ & 0x00004000) != 0)) { output.writeUInt64(15, lastBlockReportTime_); } if (((bitField0_ & 0x00008000) != 0)) { output.writeUInt64(16, lastBlockReportMonotonic_); } if (((bitField0_ & 0x00010000) != 0)) { output.writeUInt32(17, numBlocks_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getId()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, capacity_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, dfsUsed_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, remaining_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, blockPoolUsed_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, lastUpdate_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(7, xceiverCount_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(8, location_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(9, nonDfsUsed_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(10, adminState_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(11, cacheCapacity_); } if (((bitField0_ & 0x00000800) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(12, cacheUsed_); } if (((bitField0_ & 0x00001000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(13, lastUpdateMonotonic_); } if (((bitField0_ & 0x00002000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(14, upgradeDomain_); } if (((bitField0_ & 0x00004000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(15, lastBlockReportTime_); } if (((bitField0_ & 0x00008000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(16, lastBlockReportMonotonic_); } if (((bitField0_ & 0x00010000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(17, numBlocks_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj; if (hasId() != other.hasId()) return false; if (hasId()) { if (!getId() .equals(other.getId())) return false; } if (hasCapacity() != other.hasCapacity()) return false; if (hasCapacity()) { if (getCapacity() != other.getCapacity()) return false; } if (hasDfsUsed() != other.hasDfsUsed()) return false; if (hasDfsUsed()) { if (getDfsUsed() != other.getDfsUsed()) return false; } if (hasRemaining() != other.hasRemaining()) return false; if (hasRemaining()) { if (getRemaining() != other.getRemaining()) return false; } if (hasBlockPoolUsed() != other.hasBlockPoolUsed()) return false; if (hasBlockPoolUsed()) { if (getBlockPoolUsed() != other.getBlockPoolUsed()) return false; } if (hasLastUpdate() != other.hasLastUpdate()) return false; if (hasLastUpdate()) { if (getLastUpdate() != other.getLastUpdate()) return false; } if (hasXceiverCount() != other.hasXceiverCount()) return false; if (hasXceiverCount()) { if (getXceiverCount() != other.getXceiverCount()) return false; } if (hasLocation() != other.hasLocation()) return false; if (hasLocation()) { if (!getLocation() .equals(other.getLocation())) return false; } if (hasNonDfsUsed() != other.hasNonDfsUsed()) return false; if (hasNonDfsUsed()) { if (getNonDfsUsed() != other.getNonDfsUsed()) return false; } if (hasAdminState() != other.hasAdminState()) return false; if (hasAdminState()) { if (adminState_ != other.adminState_) return false; } if (hasCacheCapacity() != other.hasCacheCapacity()) return false; if (hasCacheCapacity()) { if (getCacheCapacity() != other.getCacheCapacity()) return false; } if (hasCacheUsed() != other.hasCacheUsed()) return false; if (hasCacheUsed()) { if (getCacheUsed() != other.getCacheUsed()) return false; } if (hasLastUpdateMonotonic() != other.hasLastUpdateMonotonic()) return false; if (hasLastUpdateMonotonic()) { if (getLastUpdateMonotonic() != other.getLastUpdateMonotonic()) return false; } if (hasUpgradeDomain() != other.hasUpgradeDomain()) return false; if (hasUpgradeDomain()) { if (!getUpgradeDomain() .equals(other.getUpgradeDomain())) return false; } if (hasLastBlockReportTime() != other.hasLastBlockReportTime()) return false; if (hasLastBlockReportTime()) { if (getLastBlockReportTime() != other.getLastBlockReportTime()) return false; } if (hasLastBlockReportMonotonic() != other.hasLastBlockReportMonotonic()) return false; if (hasLastBlockReportMonotonic()) { if (getLastBlockReportMonotonic() != other.getLastBlockReportMonotonic()) return false; } if (hasNumBlocks() != other.hasNumBlocks()) return false; if (hasNumBlocks()) { if (getNumBlocks() != other.getNumBlocks()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + getId().hashCode(); } if (hasCapacity()) { hash = (37 * hash) + CAPACITY_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCapacity()); } if (hasDfsUsed()) { hash = (37 * hash) + DFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDfsUsed()); } if (hasRemaining()) { hash = (37 * hash) + REMAINING_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getRemaining()); } if (hasBlockPoolUsed()) { hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockPoolUsed()); } if (hasLastUpdate()) { hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastUpdate()); } if (hasXceiverCount()) { hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER; hash = (53 * hash) + getXceiverCount(); } if (hasLocation()) { hash = (37 * hash) + LOCATION_FIELD_NUMBER; hash = (53 * hash) + getLocation().hashCode(); } if (hasNonDfsUsed()) { hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNonDfsUsed()); } if (hasAdminState()) { hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER; hash = (53 * hash) + adminState_; } if (hasCacheCapacity()) { hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCacheCapacity()); } if (hasCacheUsed()) { hash = (37 * hash) + CACHEUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCacheUsed()); } if (hasLastUpdateMonotonic()) { hash = (37 * hash) + LASTUPDATEMONOTONIC_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastUpdateMonotonic()); } if (hasUpgradeDomain()) { hash = (37 * hash) + UPGRADEDOMAIN_FIELD_NUMBER; hash = (53 * hash) + getUpgradeDomain().hashCode(); } if (hasLastBlockReportTime()) { hash = (37 * hash) + LASTBLOCKREPORTTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastBlockReportTime()); } if (hasLastBlockReportMonotonic()) { hash = (37 * hash) + LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastBlockReportMonotonic()); } if (hasNumBlocks()) { hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER; hash = (53 * hash) + getNumBlocks(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * The status of a Datanode
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getIdFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (idBuilder_ == null) { id_ = null; } else { idBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); capacity_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); dfsUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); remaining_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); blockPoolUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); lastUpdate_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); xceiverCount_ = 0; bitField0_ = (bitField0_ & ~0x00000040); location_ = ""; bitField0_ = (bitField0_ & ~0x00000080); nonDfsUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000100); adminState_ = 0; bitField0_ = (bitField0_ & ~0x00000200); cacheCapacity_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); cacheUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000800); lastUpdateMonotonic_ = 0L; bitField0_ = (bitField0_ & ~0x00001000); upgradeDomain_ = ""; bitField0_ = (bitField0_ & ~0x00002000); lastBlockReportTime_ = 0L; bitField0_ = (bitField0_ & ~0x00004000); lastBlockReportMonotonic_ = 0L; bitField0_ = (bitField0_ & ~0x00008000); numBlocks_ = 0; bitField0_ = (bitField0_ & ~0x00010000); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (idBuilder_ == null) { result.id_ = id_; } else { result.id_ = idBuilder_.build(); } to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.capacity_ = capacity_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.dfsUsed_ = dfsUsed_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.remaining_ = remaining_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.blockPoolUsed_ = blockPoolUsed_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.lastUpdate_ = lastUpdate_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.xceiverCount_ = xceiverCount_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { to_bitField0_ |= 0x00000080; } result.location_ = location_; if (((from_bitField0_ & 0x00000100) != 0)) { result.nonDfsUsed_ = nonDfsUsed_; to_bitField0_ |= 0x00000100; } if (((from_bitField0_ & 0x00000200) != 0)) { to_bitField0_ |= 0x00000200; } result.adminState_ = adminState_; if (((from_bitField0_ & 0x00000400) != 0)) { result.cacheCapacity_ = cacheCapacity_; to_bitField0_ |= 0x00000400; } if (((from_bitField0_ & 0x00000800) != 0)) { result.cacheUsed_ = cacheUsed_; to_bitField0_ |= 0x00000800; } if (((from_bitField0_ & 0x00001000) != 0)) { result.lastUpdateMonotonic_ = lastUpdateMonotonic_; to_bitField0_ |= 0x00001000; } if (((from_bitField0_ & 0x00002000) != 0)) { to_bitField0_ |= 0x00002000; } result.upgradeDomain_ = upgradeDomain_; if (((from_bitField0_ & 0x00004000) != 0)) { result.lastBlockReportTime_ = lastBlockReportTime_; to_bitField0_ |= 0x00004000; } if (((from_bitField0_ & 0x00008000) != 0)) { result.lastBlockReportMonotonic_ = lastBlockReportMonotonic_; to_bitField0_ |= 0x00008000; } if (((from_bitField0_ & 0x00010000) != 0)) { result.numBlocks_ = numBlocks_; to_bitField0_ |= 0x00010000; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this; if (other.hasId()) { mergeId(other.getId()); } if (other.hasCapacity()) { setCapacity(other.getCapacity()); } if (other.hasDfsUsed()) { setDfsUsed(other.getDfsUsed()); } if (other.hasRemaining()) { setRemaining(other.getRemaining()); } if (other.hasBlockPoolUsed()) { setBlockPoolUsed(other.getBlockPoolUsed()); } if (other.hasLastUpdate()) { setLastUpdate(other.getLastUpdate()); } if (other.hasXceiverCount()) { setXceiverCount(other.getXceiverCount()); } if (other.hasLocation()) { bitField0_ |= 0x00000080; location_ = other.location_; onChanged(); } if (other.hasNonDfsUsed()) { setNonDfsUsed(other.getNonDfsUsed()); } if (other.hasAdminState()) { setAdminState(other.getAdminState()); } if (other.hasCacheCapacity()) { setCacheCapacity(other.getCacheCapacity()); } if (other.hasCacheUsed()) { setCacheUsed(other.getCacheUsed()); } if (other.hasLastUpdateMonotonic()) { setLastUpdateMonotonic(other.getLastUpdateMonotonic()); } if (other.hasUpgradeDomain()) { bitField0_ |= 0x00002000; upgradeDomain_ = other.upgradeDomain_; onChanged(); } if (other.hasLastBlockReportTime()) { setLastBlockReportTime(other.getLastBlockReportTime()); } if (other.hasLastBlockReportMonotonic()) { setLastBlockReportMonotonic(other.getLastBlockReportMonotonic()); } if (other.hasNumBlocks()) { setNumBlocks(other.getNumBlocks()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasId()) { return false; } if (!getId().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_; /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { if (idBuilder_ == null) { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } else { return idBuilder_.getMessage(); } } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (idBuilder_ == null) { if (value == null) { throw new NullPointerException(); } id_ = value; onChanged(); } else { idBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder setId( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { if (idBuilder_ == null) { id_ = builderForValue.build(); onChanged(); } else { idBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (idBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && id_ != null && id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(id_).mergeFrom(value).buildPartial(); } else { id_ = value; } onChanged(); } else { idBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder clearId() { if (idBuilder_ == null) { id_ = null; onChanged(); } else { idBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getIdBuilder() { bitField0_ |= 0x00000001; onChanged(); return getIdFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { if (idBuilder_ != null) { return idBuilder_.getMessageOrBuilder(); } else { return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_; } } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> getIdFieldBuilder() { if (idBuilder_ == null) { idBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( getId(), getParentForChildren(), isClean()); id_ = null; } return idBuilder_; } private long capacity_ ; /** * optional uint64 capacity = 2 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 capacity = 2 [default = 0]; */ public long getCapacity() { return capacity_; } /** * optional uint64 capacity = 2 [default = 0]; */ public Builder setCapacity(long value) { bitField0_ |= 0x00000002; capacity_ = value; onChanged(); return this; } /** * optional uint64 capacity = 2 [default = 0]; */ public Builder clearCapacity() { bitField0_ = (bitField0_ & ~0x00000002); capacity_ = 0L; onChanged(); return this; } private long dfsUsed_ ; /** * optional uint64 dfsUsed = 3 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public Builder setDfsUsed(long value) { bitField0_ |= 0x00000004; dfsUsed_ = value; onChanged(); return this; } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public Builder clearDfsUsed() { bitField0_ = (bitField0_ & ~0x00000004); dfsUsed_ = 0L; onChanged(); return this; } private long remaining_ ; /** * optional uint64 remaining = 4 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 remaining = 4 [default = 0]; */ public long getRemaining() { return remaining_; } /** * optional uint64 remaining = 4 [default = 0]; */ public Builder setRemaining(long value) { bitField0_ |= 0x00000008; remaining_ = value; onChanged(); return this; } /** * optional uint64 remaining = 4 [default = 0]; */ public Builder clearRemaining() { bitField0_ = (bitField0_ & ~0x00000008); remaining_ = 0L; onChanged(); return this; } private long blockPoolUsed_ ; /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public Builder setBlockPoolUsed(long value) { bitField0_ |= 0x00000010; blockPoolUsed_ = value; onChanged(); return this; } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public Builder clearBlockPoolUsed() { bitField0_ = (bitField0_ & ~0x00000010); blockPoolUsed_ = 0L; onChanged(); return this; } private long lastUpdate_ ; /** * optional uint64 lastUpdate = 6 [default = 0]; */ public boolean hasLastUpdate() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public long getLastUpdate() { return lastUpdate_; } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public Builder setLastUpdate(long value) { bitField0_ |= 0x00000020; lastUpdate_ = value; onChanged(); return this; } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public Builder clearLastUpdate() { bitField0_ = (bitField0_ & ~0x00000020); lastUpdate_ = 0L; onChanged(); return this; } private int xceiverCount_ ; /** * optional uint32 xceiverCount = 7 [default = 0]; */ public boolean hasXceiverCount() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public int getXceiverCount() { return xceiverCount_; } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public Builder setXceiverCount(int value) { bitField0_ |= 0x00000040; xceiverCount_ = value; onChanged(); return this; } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public Builder clearXceiverCount() { bitField0_ = (bitField0_ & ~0x00000040); xceiverCount_ = 0; onChanged(); return this; } private java.lang.Object location_ = ""; /** * optional string location = 8; */ public boolean hasLocation() { return ((bitField0_ & 0x00000080) != 0); } /** * optional string location = 8; */ public java.lang.String getLocation() { java.lang.Object ref = location_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { location_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string location = 8; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getLocationBytes() { java.lang.Object ref = location_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); location_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string location = 8; */ public Builder setLocation( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000080; location_ = value; onChanged(); return this; } /** * optional string location = 8; */ public Builder clearLocation() { bitField0_ = (bitField0_ & ~0x00000080); location_ = getDefaultInstance().getLocation(); onChanged(); return this; } /** * optional string location = 8; */ public Builder setLocationBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000080; location_ = value; onChanged(); return this; } private long nonDfsUsed_ ; /** * optional uint64 nonDfsUsed = 9; */ public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 nonDfsUsed = 9; */ public long getNonDfsUsed() { return nonDfsUsed_; } /** * optional uint64 nonDfsUsed = 9; */ public Builder setNonDfsUsed(long value) { bitField0_ |= 0x00000100; nonDfsUsed_ = value; onChanged(); return this; } /** * optional uint64 nonDfsUsed = 9; */ public Builder clearNonDfsUsed() { bitField0_ = (bitField0_ & ~0x00000100); nonDfsUsed_ = 0L; onChanged(); return this; } private int adminState_ = 0; /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public boolean hasAdminState() { return ((bitField0_ & 0x00000200) != 0); } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(adminState_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL : result; } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public Builder setAdminState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000200; adminState_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public Builder clearAdminState() { bitField0_ = (bitField0_ & ~0x00000200); adminState_ = 0; onChanged(); return this; } private long cacheCapacity_ ; /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public boolean hasCacheCapacity() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public long getCacheCapacity() { return cacheCapacity_; } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public Builder setCacheCapacity(long value) { bitField0_ |= 0x00000400; cacheCapacity_ = value; onChanged(); return this; } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public Builder clearCacheCapacity() { bitField0_ = (bitField0_ & ~0x00000400); cacheCapacity_ = 0L; onChanged(); return this; } private long cacheUsed_ ; /** * optional uint64 cacheUsed = 12 [default = 0]; */ public boolean hasCacheUsed() { return ((bitField0_ & 0x00000800) != 0); } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public long getCacheUsed() { return cacheUsed_; } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public Builder setCacheUsed(long value) { bitField0_ |= 0x00000800; cacheUsed_ = value; onChanged(); return this; } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public Builder clearCacheUsed() { bitField0_ = (bitField0_ & ~0x00000800); cacheUsed_ = 0L; onChanged(); return this; } private long lastUpdateMonotonic_ ; /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public boolean hasLastUpdateMonotonic() { return ((bitField0_ & 0x00001000) != 0); } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public long getLastUpdateMonotonic() { return lastUpdateMonotonic_; } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public Builder setLastUpdateMonotonic(long value) { bitField0_ |= 0x00001000; lastUpdateMonotonic_ = value; onChanged(); return this; } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public Builder clearLastUpdateMonotonic() { bitField0_ = (bitField0_ & ~0x00001000); lastUpdateMonotonic_ = 0L; onChanged(); return this; } private java.lang.Object upgradeDomain_ = ""; /** * optional string upgradeDomain = 14; */ public boolean hasUpgradeDomain() { return ((bitField0_ & 0x00002000) != 0); } /** * optional string upgradeDomain = 14; */ public java.lang.String getUpgradeDomain() { java.lang.Object ref = upgradeDomain_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { upgradeDomain_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string upgradeDomain = 14; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getUpgradeDomainBytes() { java.lang.Object ref = upgradeDomain_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); upgradeDomain_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string upgradeDomain = 14; */ public Builder setUpgradeDomain( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00002000; upgradeDomain_ = value; onChanged(); return this; } /** * optional string upgradeDomain = 14; */ public Builder clearUpgradeDomain() { bitField0_ = (bitField0_ & ~0x00002000); upgradeDomain_ = getDefaultInstance().getUpgradeDomain(); onChanged(); return this; } /** * optional string upgradeDomain = 14; */ public Builder setUpgradeDomainBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00002000; upgradeDomain_ = value; onChanged(); return this; } private long lastBlockReportTime_ ; /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ public boolean hasLastBlockReportTime() { return ((bitField0_ & 0x00004000) != 0); } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ public long getLastBlockReportTime() { return lastBlockReportTime_; } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ public Builder setLastBlockReportTime(long value) { bitField0_ |= 0x00004000; lastBlockReportTime_ = value; onChanged(); return this; } /** * optional uint64 lastBlockReportTime = 15 [default = 0]; */ public Builder clearLastBlockReportTime() { bitField0_ = (bitField0_ & ~0x00004000); lastBlockReportTime_ = 0L; onChanged(); return this; } private long lastBlockReportMonotonic_ ; /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ public boolean hasLastBlockReportMonotonic() { return ((bitField0_ & 0x00008000) != 0); } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ public long getLastBlockReportMonotonic() { return lastBlockReportMonotonic_; } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ public Builder setLastBlockReportMonotonic(long value) { bitField0_ |= 0x00008000; lastBlockReportMonotonic_ = value; onChanged(); return this; } /** * optional uint64 lastBlockReportMonotonic = 16 [default = 0]; */ public Builder clearLastBlockReportMonotonic() { bitField0_ = (bitField0_ & ~0x00008000); lastBlockReportMonotonic_ = 0L; onChanged(); return this; } private int numBlocks_ ; /** * optional uint32 numBlocks = 17 [default = 0]; */ public boolean hasNumBlocks() { return ((bitField0_ & 0x00010000) != 0); } /** * optional uint32 numBlocks = 17 [default = 0]; */ public int getNumBlocks() { return numBlocks_; } /** * optional uint32 numBlocks = 17 [default = 0]; */ public Builder setNumBlocks(int value) { bitField0_ |= 0x00010000; numBlocks_ = value; onChanged(); return this; } /** * optional uint32 numBlocks = 17 [default = 0]; */ public Builder clearNumBlocks() { bitField0_ = (bitField0_ & ~0x00010000); numBlocks_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DatanodeInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DatanodeStorageProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeStorageProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string storageUuid = 1; */ boolean hasStorageUuid(); /** * required string storageUuid = 1; */ java.lang.String getStorageUuid(); /** * required string storageUuid = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes(); /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ boolean hasState(); /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState(); /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ boolean hasStorageType(); /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType(); } /** *
   **
   * Represents a storage available on the datanode
   * 
* * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto} */ public static final class DatanodeStorageProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeStorageProto) DatanodeStorageProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatanodeStorageProto.newBuilder() to construct. private DatanodeStorageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DatanodeStorageProto() { storageUuid_ = ""; state_ = 0; storageType_ = 1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeStorageProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; storageUuid_ = bs; break; } case 16: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; state_ = rawValue; } break; } case 24: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(3, rawValue); } else { bitField0_ |= 0x00000004; storageType_ = rawValue; } break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.DatanodeStorageProto.StorageState} */ public enum StorageState implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * NORMAL = 0; */ NORMAL(0), /** * READ_ONLY_SHARED = 1; */ READ_ONLY_SHARED(1), ; /** * NORMAL = 0; */ public static final int NORMAL_VALUE = 0; /** * READ_ONLY_SHARED = 1; */ public static final int READ_ONLY_SHARED_VALUE = 1; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static StorageState valueOf(int value) { return forNumber(value); } public static StorageState forNumber(int value) { switch (value) { case 0: return NORMAL; case 1: return READ_ONLY_SHARED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< StorageState> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public StorageState findValueByNumber(int number) { return StorageState.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDescriptor().getEnumTypes().get(0); } private static final StorageState[] VALUES = values(); public static StorageState valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private StorageState(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeStorageProto.StorageState) } private int bitField0_; public static final int STORAGEUUID_FIELD_NUMBER = 1; private volatile java.lang.Object storageUuid_; /** * required string storageUuid = 1; */ public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1; */ public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } } /** * required string storageUuid = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int STATE_FIELD_NUMBER = 2; private int state_; /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public boolean hasState() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.valueOf(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL : result; } public static final int STORAGETYPE_FIELD_NUMBER = 3; private int storageType_; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasStorageUuid()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, state_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeEnum(3, storageType_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, state_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(3, storageType_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) obj; if (hasStorageUuid() != other.hasStorageUuid()) return false; if (hasStorageUuid()) { if (!getStorageUuid() .equals(other.getStorageUuid())) return false; } if (hasState() != other.hasState()) return false; if (hasState()) { if (state_ != other.state_) return false; } if (hasStorageType() != other.hasStorageType()) return false; if (hasStorageType()) { if (storageType_ != other.storageType_) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasStorageUuid()) { hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER; hash = (53 * hash) + getStorageUuid().hashCode(); } if (hasState()) { hash = (37 * hash) + STATE_FIELD_NUMBER; hash = (53 * hash) + state_; } if (hasStorageType()) { hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER; hash = (53 * hash) + storageType_; } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Represents a storage available on the datanode
     * 
* * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeStorageProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); storageUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000001); state_ = 0; bitField0_ = (bitField0_ & ~0x00000002); storageType_ = 1; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.storageUuid_ = storageUuid_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.state_ = state_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.storageType_ = storageType_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) return this; if (other.hasStorageUuid()) { bitField0_ |= 0x00000001; storageUuid_ = other.storageUuid_; onChanged(); } if (other.hasState()) { setState(other.getState()); } if (other.hasStorageType()) { setStorageType(other.getStorageType()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasStorageUuid()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1; */ public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1; */ public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string storageUuid = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string storageUuid = 1; */ public Builder setStorageUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } /** * required string storageUuid = 1; */ public Builder clearStorageUuid() { bitField0_ = (bitField0_ & ~0x00000001); storageUuid_ = getDefaultInstance().getStorageUuid(); onChanged(); return this; } /** * required string storageUuid = 1; */ public Builder setStorageUuidBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } private int state_ = 0; /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public boolean hasState() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.valueOf(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL : result; } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; state_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000002); state_ = 0; onChanged(); return this; } private int storageType_ = 1; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; storageType_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public Builder clearStorageType() { bitField0_ = (bitField0_ & ~0x00000004); storageType_ = 1; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeStorageProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeStorageProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DatanodeStorageProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DatanodeStorageProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageReportProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageReportProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated boolean hasStorageUuid(); /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated java.lang.String getStorageUuid(); /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes(); /** * optional bool failed = 2 [default = false]; */ boolean hasFailed(); /** * optional bool failed = 2 [default = false]; */ boolean getFailed(); /** * optional uint64 capacity = 3 [default = 0]; */ boolean hasCapacity(); /** * optional uint64 capacity = 3 [default = 0]; */ long getCapacity(); /** * optional uint64 dfsUsed = 4 [default = 0]; */ boolean hasDfsUsed(); /** * optional uint64 dfsUsed = 4 [default = 0]; */ long getDfsUsed(); /** * optional uint64 remaining = 5 [default = 0]; */ boolean hasRemaining(); /** * optional uint64 remaining = 5 [default = 0]; */ long getRemaining(); /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ boolean hasBlockPoolUsed(); /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ long getBlockPoolUsed(); /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ boolean hasStorage(); /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage(); /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder(); /** * optional uint64 nonDfsUsed = 8; */ boolean hasNonDfsUsed(); /** * optional uint64 nonDfsUsed = 8; */ long getNonDfsUsed(); } /** * Protobuf type {@code hadoop.hdfs.StorageReportProto} */ public static final class StorageReportProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageReportProto) StorageReportProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageReportProto.newBuilder() to construct. private StorageReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageReportProto() { storageUuid_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageReportProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; storageUuid_ = bs; break; } case 16: { bitField0_ |= 0x00000002; failed_ = input.readBool(); break; } case 24: { bitField0_ |= 0x00000004; capacity_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; dfsUsed_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; remaining_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; blockPoolUsed_ = input.readUInt64(); break; } case 58: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder subBuilder = null; if (((bitField0_ & 0x00000040) != 0)) { subBuilder = storage_.toBuilder(); } storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(storage_); storage_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000040; break; } case 64: { bitField0_ |= 0x00000080; nonDfsUsed_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class); } private int bitField0_; public static final int STORAGEUUID_FIELD_NUMBER = 1; private volatile java.lang.Object storageUuid_; /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int FAILED_FIELD_NUMBER = 2; private boolean failed_; /** * optional bool failed = 2 [default = false]; */ public boolean hasFailed() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool failed = 2 [default = false]; */ public boolean getFailed() { return failed_; } public static final int CAPACITY_FIELD_NUMBER = 3; private long capacity_; /** * optional uint64 capacity = 3 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 capacity = 3 [default = 0]; */ public long getCapacity() { return capacity_; } public static final int DFSUSED_FIELD_NUMBER = 4; private long dfsUsed_; /** * optional uint64 dfsUsed = 4 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } public static final int REMAINING_FIELD_NUMBER = 5; private long remaining_; /** * optional uint64 remaining = 5 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 remaining = 5 [default = 0]; */ public long getRemaining() { return remaining_; } public static final int BLOCKPOOLUSED_FIELD_NUMBER = 6; private long blockPoolUsed_; /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } public static final int STORAGE_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_; /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public boolean hasStorage() { return ((bitField0_ & 0x00000040) != 0); } /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } /** *
     * supersedes StorageUuid
     * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } public static final int NONDFSUSED_FIELD_NUMBER = 8; private long nonDfsUsed_; /** * optional uint64 nonDfsUsed = 8; */ public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 nonDfsUsed = 8; */ public long getNonDfsUsed() { return nonDfsUsed_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasStorageUuid()) { memoizedIsInitialized = 0; return false; } if (hasStorage()) { if (!getStorage().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, failed_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, capacity_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, dfsUsed_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, remaining_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, blockPoolUsed_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeMessage(7, getStorage()); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt64(8, nonDfsUsed_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, storageUuid_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, failed_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, capacity_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, dfsUsed_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, remaining_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, blockPoolUsed_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getStorage()); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(8, nonDfsUsed_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) obj; if (hasStorageUuid() != other.hasStorageUuid()) return false; if (hasStorageUuid()) { if (!getStorageUuid() .equals(other.getStorageUuid())) return false; } if (hasFailed() != other.hasFailed()) return false; if (hasFailed()) { if (getFailed() != other.getFailed()) return false; } if (hasCapacity() != other.hasCapacity()) return false; if (hasCapacity()) { if (getCapacity() != other.getCapacity()) return false; } if (hasDfsUsed() != other.hasDfsUsed()) return false; if (hasDfsUsed()) { if (getDfsUsed() != other.getDfsUsed()) return false; } if (hasRemaining() != other.hasRemaining()) return false; if (hasRemaining()) { if (getRemaining() != other.getRemaining()) return false; } if (hasBlockPoolUsed() != other.hasBlockPoolUsed()) return false; if (hasBlockPoolUsed()) { if (getBlockPoolUsed() != other.getBlockPoolUsed()) return false; } if (hasStorage() != other.hasStorage()) return false; if (hasStorage()) { if (!getStorage() .equals(other.getStorage())) return false; } if (hasNonDfsUsed() != other.hasNonDfsUsed()) return false; if (hasNonDfsUsed()) { if (getNonDfsUsed() != other.getNonDfsUsed()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasStorageUuid()) { hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER; hash = (53 * hash) + getStorageUuid().hashCode(); } if (hasFailed()) { hash = (37 * hash) + FAILED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getFailed()); } if (hasCapacity()) { hash = (37 * hash) + CAPACITY_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCapacity()); } if (hasDfsUsed()) { hash = (37 * hash) + DFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDfsUsed()); } if (hasRemaining()) { hash = (37 * hash) + REMAINING_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getRemaining()); } if (hasBlockPoolUsed()) { hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockPoolUsed()); } if (hasStorage()) { hash = (37 * hash) + STORAGE_FIELD_NUMBER; hash = (53 * hash) + getStorage().hashCode(); } if (hasNonDfsUsed()) { hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNonDfsUsed()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageReportProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageReportProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getStorageFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); storageUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000001); failed_ = false; bitField0_ = (bitField0_ & ~0x00000002); capacity_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); dfsUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); remaining_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); blockPoolUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); if (storageBuilder_ == null) { storage_ = null; } else { storageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); nonDfsUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000080); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.storageUuid_ = storageUuid_; if (((from_bitField0_ & 0x00000002) != 0)) { result.failed_ = failed_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.capacity_ = capacity_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.dfsUsed_ = dfsUsed_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.remaining_ = remaining_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.blockPoolUsed_ = blockPoolUsed_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { if (storageBuilder_ == null) { result.storage_ = storage_; } else { result.storage_ = storageBuilder_.build(); } to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.nonDfsUsed_ = nonDfsUsed_; to_bitField0_ |= 0x00000080; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()) return this; if (other.hasStorageUuid()) { bitField0_ |= 0x00000001; storageUuid_ = other.storageUuid_; onChanged(); } if (other.hasFailed()) { setFailed(other.getFailed()); } if (other.hasCapacity()) { setCapacity(other.getCapacity()); } if (other.hasDfsUsed()) { setDfsUsed(other.getDfsUsed()); } if (other.hasRemaining()) { setRemaining(other.getRemaining()); } if (other.hasBlockPoolUsed()) { setBlockPoolUsed(other.getBlockPoolUsed()); } if (other.hasStorage()) { mergeStorage(other.getStorage()); } if (other.hasNonDfsUsed()) { setNonDfsUsed(other.getNonDfsUsed()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasStorageUuid()) { return false; } if (hasStorage()) { if (!getStorage().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) != 0); } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public Builder setStorageUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public Builder clearStorageUuid() { bitField0_ = (bitField0_ & ~0x00000001); storageUuid_ = getDefaultInstance().getStorageUuid(); onChanged(); return this; } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public Builder setStorageUuidBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } private boolean failed_ ; /** * optional bool failed = 2 [default = false]; */ public boolean hasFailed() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool failed = 2 [default = false]; */ public boolean getFailed() { return failed_; } /** * optional bool failed = 2 [default = false]; */ public Builder setFailed(boolean value) { bitField0_ |= 0x00000002; failed_ = value; onChanged(); return this; } /** * optional bool failed = 2 [default = false]; */ public Builder clearFailed() { bitField0_ = (bitField0_ & ~0x00000002); failed_ = false; onChanged(); return this; } private long capacity_ ; /** * optional uint64 capacity = 3 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 capacity = 3 [default = 0]; */ public long getCapacity() { return capacity_; } /** * optional uint64 capacity = 3 [default = 0]; */ public Builder setCapacity(long value) { bitField0_ |= 0x00000004; capacity_ = value; onChanged(); return this; } /** * optional uint64 capacity = 3 [default = 0]; */ public Builder clearCapacity() { bitField0_ = (bitField0_ & ~0x00000004); capacity_ = 0L; onChanged(); return this; } private long dfsUsed_ ; /** * optional uint64 dfsUsed = 4 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public Builder setDfsUsed(long value) { bitField0_ |= 0x00000008; dfsUsed_ = value; onChanged(); return this; } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public Builder clearDfsUsed() { bitField0_ = (bitField0_ & ~0x00000008); dfsUsed_ = 0L; onChanged(); return this; } private long remaining_ ; /** * optional uint64 remaining = 5 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 remaining = 5 [default = 0]; */ public long getRemaining() { return remaining_; } /** * optional uint64 remaining = 5 [default = 0]; */ public Builder setRemaining(long value) { bitField0_ |= 0x00000010; remaining_ = value; onChanged(); return this; } /** * optional uint64 remaining = 5 [default = 0]; */ public Builder clearRemaining() { bitField0_ = (bitField0_ & ~0x00000010); remaining_ = 0L; onChanged(); return this; } private long blockPoolUsed_ ; /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public Builder setBlockPoolUsed(long value) { bitField0_ |= 0x00000020; blockPoolUsed_ = value; onChanged(); return this; } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public Builder clearBlockPoolUsed() { bitField0_ = (bitField0_ & ~0x00000020); blockPoolUsed_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_; /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public boolean hasStorage() { return ((bitField0_ & 0x00000040) != 0); } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() { if (storageBuilder_ == null) { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } else { return storageBuilder_.getMessage(); } } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) { if (storageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storage_ = value; onChanged(); } else { storageBuilder_.setMessage(value); } bitField0_ |= 0x00000040; return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder setStorage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) { if (storageBuilder_ == null) { storage_ = builderForValue.build(); onChanged(); } else { storageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) { if (storageBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && storage_ != null && storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder(storage_).mergeFrom(value).buildPartial(); } else { storage_ = value; } onChanged(); } else { storageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public Builder clearStorage() { if (storageBuilder_ == null) { storage_ = null; onChanged(); } else { storageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() { bitField0_ |= 0x00000040; onChanged(); return getStorageFieldBuilder().getBuilder(); } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() { if (storageBuilder_ != null) { return storageBuilder_.getMessageOrBuilder(); } else { return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_; } } /** *
       * supersedes StorageUuid
       * 
* * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> getStorageFieldBuilder() { if (storageBuilder_ == null) { storageBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>( getStorage(), getParentForChildren(), isClean()); storage_ = null; } return storageBuilder_; } private long nonDfsUsed_ ; /** * optional uint64 nonDfsUsed = 8; */ public boolean hasNonDfsUsed() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 nonDfsUsed = 8; */ public long getNonDfsUsed() { return nonDfsUsed_; } /** * optional uint64 nonDfsUsed = 8; */ public Builder setNonDfsUsed(long value) { bitField0_ |= 0x00000080; nonDfsUsed_ = value; onChanged(); return this; } /** * optional uint64 nonDfsUsed = 8; */ public Builder clearNonDfsUsed() { bitField0_ = (bitField0_ & ~0x00000080); nonDfsUsed_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageReportProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageReportProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageReportProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StorageReportProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ContentSummaryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ContentSummaryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 length = 1; */ boolean hasLength(); /** * required uint64 length = 1; */ long getLength(); /** * required uint64 fileCount = 2; */ boolean hasFileCount(); /** * required uint64 fileCount = 2; */ long getFileCount(); /** * required uint64 directoryCount = 3; */ boolean hasDirectoryCount(); /** * required uint64 directoryCount = 3; */ long getDirectoryCount(); /** * required uint64 quota = 4; */ boolean hasQuota(); /** * required uint64 quota = 4; */ long getQuota(); /** * required uint64 spaceConsumed = 5; */ boolean hasSpaceConsumed(); /** * required uint64 spaceConsumed = 5; */ long getSpaceConsumed(); /** * required uint64 spaceQuota = 6; */ boolean hasSpaceQuota(); /** * required uint64 spaceQuota = 6; */ long getSpaceQuota(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ boolean hasTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder(); /** * optional uint64 snapshotLength = 8; */ boolean hasSnapshotLength(); /** * optional uint64 snapshotLength = 8; */ long getSnapshotLength(); /** * optional uint64 snapshotFileCount = 9; */ boolean hasSnapshotFileCount(); /** * optional uint64 snapshotFileCount = 9; */ long getSnapshotFileCount(); /** * optional uint64 snapshotDirectoryCount = 10; */ boolean hasSnapshotDirectoryCount(); /** * optional uint64 snapshotDirectoryCount = 10; */ long getSnapshotDirectoryCount(); /** * optional uint64 snapshotSpaceConsumed = 11; */ boolean hasSnapshotSpaceConsumed(); /** * optional uint64 snapshotSpaceConsumed = 11; */ long getSnapshotSpaceConsumed(); /** * optional string erasureCodingPolicy = 12; */ boolean hasErasureCodingPolicy(); /** * optional string erasureCodingPolicy = 12; */ java.lang.String getErasureCodingPolicy(); /** * optional string erasureCodingPolicy = 12; */ org.apache.hadoop.thirdparty.protobuf.ByteString getErasureCodingPolicyBytes(); } /** *
   **
   * Summary of a file or directory
   * 
* * Protobuf type {@code hadoop.hdfs.ContentSummaryProto} */ public static final class ContentSummaryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ContentSummaryProto) ContentSummaryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ContentSummaryProto.newBuilder() to construct. private ContentSummaryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ContentSummaryProto() { erasureCodingPolicy_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ContentSummaryProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; length_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; fileCount_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; directoryCount_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; quota_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; spaceConsumed_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; spaceQuota_ = input.readUInt64(); break; } case 58: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder subBuilder = null; if (((bitField0_ & 0x00000040) != 0)) { subBuilder = typeQuotaInfos_.toBuilder(); } typeQuotaInfos_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(typeQuotaInfos_); typeQuotaInfos_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000040; break; } case 64: { bitField0_ |= 0x00000080; snapshotLength_ = input.readUInt64(); break; } case 72: { bitField0_ |= 0x00000100; snapshotFileCount_ = input.readUInt64(); break; } case 80: { bitField0_ |= 0x00000200; snapshotDirectoryCount_ = input.readUInt64(); break; } case 88: { bitField0_ |= 0x00000400; snapshotSpaceConsumed_ = input.readUInt64(); break; } case 98: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000800; erasureCodingPolicy_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); } private int bitField0_; public static final int LENGTH_FIELD_NUMBER = 1; private long length_; /** * required uint64 length = 1; */ public boolean hasLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 length = 1; */ public long getLength() { return length_; } public static final int FILECOUNT_FIELD_NUMBER = 2; private long fileCount_; /** * required uint64 fileCount = 2; */ public boolean hasFileCount() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 fileCount = 2; */ public long getFileCount() { return fileCount_; } public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3; private long directoryCount_; /** * required uint64 directoryCount = 3; */ public boolean hasDirectoryCount() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 directoryCount = 3; */ public long getDirectoryCount() { return directoryCount_; } public static final int QUOTA_FIELD_NUMBER = 4; private long quota_; /** * required uint64 quota = 4; */ public boolean hasQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 quota = 4; */ public long getQuota() { return quota_; } public static final int SPACECONSUMED_FIELD_NUMBER = 5; private long spaceConsumed_; /** * required uint64 spaceConsumed = 5; */ public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 spaceConsumed = 5; */ public long getSpaceConsumed() { return spaceConsumed_; } public static final int SPACEQUOTA_FIELD_NUMBER = 6; private long spaceQuota_; /** * required uint64 spaceQuota = 6; */ public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 spaceQuota = 6; */ public long getSpaceQuota() { return spaceQuota_; } public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } public static final int SNAPSHOTLENGTH_FIELD_NUMBER = 8; private long snapshotLength_; /** * optional uint64 snapshotLength = 8; */ public boolean hasSnapshotLength() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 snapshotLength = 8; */ public long getSnapshotLength() { return snapshotLength_; } public static final int SNAPSHOTFILECOUNT_FIELD_NUMBER = 9; private long snapshotFileCount_; /** * optional uint64 snapshotFileCount = 9; */ public boolean hasSnapshotFileCount() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 snapshotFileCount = 9; */ public long getSnapshotFileCount() { return snapshotFileCount_; } public static final int SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER = 10; private long snapshotDirectoryCount_; /** * optional uint64 snapshotDirectoryCount = 10; */ public boolean hasSnapshotDirectoryCount() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint64 snapshotDirectoryCount = 10; */ public long getSnapshotDirectoryCount() { return snapshotDirectoryCount_; } public static final int SNAPSHOTSPACECONSUMED_FIELD_NUMBER = 11; private long snapshotSpaceConsumed_; /** * optional uint64 snapshotSpaceConsumed = 11; */ public boolean hasSnapshotSpaceConsumed() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 snapshotSpaceConsumed = 11; */ public long getSnapshotSpaceConsumed() { return snapshotSpaceConsumed_; } public static final int ERASURECODINGPOLICY_FIELD_NUMBER = 12; private volatile java.lang.Object erasureCodingPolicy_; /** * optional string erasureCodingPolicy = 12; */ public boolean hasErasureCodingPolicy() { return ((bitField0_ & 0x00000800) != 0); } /** * optional string erasureCodingPolicy = 12; */ public java.lang.String getErasureCodingPolicy() { java.lang.Object ref = erasureCodingPolicy_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { erasureCodingPolicy_ = s; } return s; } } /** * optional string erasureCodingPolicy = 12; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getErasureCodingPolicyBytes() { java.lang.Object ref = erasureCodingPolicy_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); erasureCodingPolicy_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasFileCount()) { memoizedIsInitialized = 0; return false; } if (!hasDirectoryCount()) { memoizedIsInitialized = 0; return false; } if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceConsumed()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceQuota()) { memoizedIsInitialized = 0; return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, length_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, fileCount_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, directoryCount_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, quota_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, spaceConsumed_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, spaceQuota_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeMessage(7, getTypeQuotaInfos()); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt64(8, snapshotLength_); } if (((bitField0_ & 0x00000100) != 0)) { output.writeUInt64(9, snapshotFileCount_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeUInt64(10, snapshotDirectoryCount_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeUInt64(11, snapshotSpaceConsumed_); } if (((bitField0_ & 0x00000800) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 12, erasureCodingPolicy_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, length_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, fileCount_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, directoryCount_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, quota_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, spaceConsumed_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, spaceQuota_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getTypeQuotaInfos()); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(8, snapshotLength_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(9, snapshotFileCount_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(10, snapshotDirectoryCount_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(11, snapshotSpaceConsumed_); } if (((bitField0_ & 0x00000800) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(12, erasureCodingPolicy_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj; if (hasLength() != other.hasLength()) return false; if (hasLength()) { if (getLength() != other.getLength()) return false; } if (hasFileCount() != other.hasFileCount()) return false; if (hasFileCount()) { if (getFileCount() != other.getFileCount()) return false; } if (hasDirectoryCount() != other.hasDirectoryCount()) return false; if (hasDirectoryCount()) { if (getDirectoryCount() != other.getDirectoryCount()) return false; } if (hasQuota() != other.hasQuota()) return false; if (hasQuota()) { if (getQuota() != other.getQuota()) return false; } if (hasSpaceConsumed() != other.hasSpaceConsumed()) return false; if (hasSpaceConsumed()) { if (getSpaceConsumed() != other.getSpaceConsumed()) return false; } if (hasSpaceQuota() != other.hasSpaceQuota()) return false; if (hasSpaceQuota()) { if (getSpaceQuota() != other.getSpaceQuota()) return false; } if (hasTypeQuotaInfos() != other.hasTypeQuotaInfos()) return false; if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos() .equals(other.getTypeQuotaInfos())) return false; } if (hasSnapshotLength() != other.hasSnapshotLength()) return false; if (hasSnapshotLength()) { if (getSnapshotLength() != other.getSnapshotLength()) return false; } if (hasSnapshotFileCount() != other.hasSnapshotFileCount()) return false; if (hasSnapshotFileCount()) { if (getSnapshotFileCount() != other.getSnapshotFileCount()) return false; } if (hasSnapshotDirectoryCount() != other.hasSnapshotDirectoryCount()) return false; if (hasSnapshotDirectoryCount()) { if (getSnapshotDirectoryCount() != other.getSnapshotDirectoryCount()) return false; } if (hasSnapshotSpaceConsumed() != other.hasSnapshotSpaceConsumed()) return false; if (hasSnapshotSpaceConsumed()) { if (getSnapshotSpaceConsumed() != other.getSnapshotSpaceConsumed()) return false; } if (hasErasureCodingPolicy() != other.hasErasureCodingPolicy()) return false; if (hasErasureCodingPolicy()) { if (!getErasureCodingPolicy() .equals(other.getErasureCodingPolicy())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLength()); } if (hasFileCount()) { hash = (37 * hash) + FILECOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileCount()); } if (hasDirectoryCount()) { hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDirectoryCount()); } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getQuota()); } if (hasSpaceConsumed()) { hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceConsumed()); } if (hasSpaceQuota()) { hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceQuota()); } if (hasTypeQuotaInfos()) { hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfos().hashCode(); } if (hasSnapshotLength()) { hash = (37 * hash) + SNAPSHOTLENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotLength()); } if (hasSnapshotFileCount()) { hash = (37 * hash) + SNAPSHOTFILECOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotFileCount()); } if (hasSnapshotDirectoryCount()) { hash = (37 * hash) + SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotDirectoryCount()); } if (hasSnapshotSpaceConsumed()) { hash = (37 * hash) + SNAPSHOTSPACECONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSnapshotSpaceConsumed()); } if (hasErasureCodingPolicy()) { hash = (37 * hash) + ERASURECODINGPOLICY_FIELD_NUMBER; hash = (53 * hash) + getErasureCodingPolicy().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Summary of a file or directory
     * 
* * Protobuf type {@code hadoop.hdfs.ContentSummaryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ContentSummaryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getTypeQuotaInfosFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); length_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); fileCount_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); directoryCount_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); quota_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); spaceConsumed_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); spaceQuota_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = null; } else { typeQuotaInfosBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); snapshotLength_ = 0L; bitField0_ = (bitField0_ & ~0x00000080); snapshotFileCount_ = 0L; bitField0_ = (bitField0_ & ~0x00000100); snapshotDirectoryCount_ = 0L; bitField0_ = (bitField0_ & ~0x00000200); snapshotSpaceConsumed_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); erasureCodingPolicy_ = ""; bitField0_ = (bitField0_ & ~0x00000800); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.length_ = length_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.fileCount_ = fileCount_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.directoryCount_ = directoryCount_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.quota_ = quota_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.spaceConsumed_ = spaceConsumed_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.spaceQuota_ = spaceQuota_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { if (typeQuotaInfosBuilder_ == null) { result.typeQuotaInfos_ = typeQuotaInfos_; } else { result.typeQuotaInfos_ = typeQuotaInfosBuilder_.build(); } to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.snapshotLength_ = snapshotLength_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { result.snapshotFileCount_ = snapshotFileCount_; to_bitField0_ |= 0x00000100; } if (((from_bitField0_ & 0x00000200) != 0)) { result.snapshotDirectoryCount_ = snapshotDirectoryCount_; to_bitField0_ |= 0x00000200; } if (((from_bitField0_ & 0x00000400) != 0)) { result.snapshotSpaceConsumed_ = snapshotSpaceConsumed_; to_bitField0_ |= 0x00000400; } if (((from_bitField0_ & 0x00000800) != 0)) { to_bitField0_ |= 0x00000800; } result.erasureCodingPolicy_ = erasureCodingPolicy_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this; if (other.hasLength()) { setLength(other.getLength()); } if (other.hasFileCount()) { setFileCount(other.getFileCount()); } if (other.hasDirectoryCount()) { setDirectoryCount(other.getDirectoryCount()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasSpaceConsumed()) { setSpaceConsumed(other.getSpaceConsumed()); } if (other.hasSpaceQuota()) { setSpaceQuota(other.getSpaceQuota()); } if (other.hasTypeQuotaInfos()) { mergeTypeQuotaInfos(other.getTypeQuotaInfos()); } if (other.hasSnapshotLength()) { setSnapshotLength(other.getSnapshotLength()); } if (other.hasSnapshotFileCount()) { setSnapshotFileCount(other.getSnapshotFileCount()); } if (other.hasSnapshotDirectoryCount()) { setSnapshotDirectoryCount(other.getSnapshotDirectoryCount()); } if (other.hasSnapshotSpaceConsumed()) { setSnapshotSpaceConsumed(other.getSnapshotSpaceConsumed()); } if (other.hasErasureCodingPolicy()) { bitField0_ |= 0x00000800; erasureCodingPolicy_ = other.erasureCodingPolicy_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasLength()) { return false; } if (!hasFileCount()) { return false; } if (!hasDirectoryCount()) { return false; } if (!hasQuota()) { return false; } if (!hasSpaceConsumed()) { return false; } if (!hasSpaceQuota()) { return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long length_ ; /** * required uint64 length = 1; */ public boolean hasLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 length = 1; */ public long getLength() { return length_; } /** * required uint64 length = 1; */ public Builder setLength(long value) { bitField0_ |= 0x00000001; length_ = value; onChanged(); return this; } /** * required uint64 length = 1; */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000001); length_ = 0L; onChanged(); return this; } private long fileCount_ ; /** * required uint64 fileCount = 2; */ public boolean hasFileCount() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 fileCount = 2; */ public long getFileCount() { return fileCount_; } /** * required uint64 fileCount = 2; */ public Builder setFileCount(long value) { bitField0_ |= 0x00000002; fileCount_ = value; onChanged(); return this; } /** * required uint64 fileCount = 2; */ public Builder clearFileCount() { bitField0_ = (bitField0_ & ~0x00000002); fileCount_ = 0L; onChanged(); return this; } private long directoryCount_ ; /** * required uint64 directoryCount = 3; */ public boolean hasDirectoryCount() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 directoryCount = 3; */ public long getDirectoryCount() { return directoryCount_; } /** * required uint64 directoryCount = 3; */ public Builder setDirectoryCount(long value) { bitField0_ |= 0x00000004; directoryCount_ = value; onChanged(); return this; } /** * required uint64 directoryCount = 3; */ public Builder clearDirectoryCount() { bitField0_ = (bitField0_ & ~0x00000004); directoryCount_ = 0L; onChanged(); return this; } private long quota_ ; /** * required uint64 quota = 4; */ public boolean hasQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 quota = 4; */ public long getQuota() { return quota_; } /** * required uint64 quota = 4; */ public Builder setQuota(long value) { bitField0_ |= 0x00000008; quota_ = value; onChanged(); return this; } /** * required uint64 quota = 4; */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000008); quota_ = 0L; onChanged(); return this; } private long spaceConsumed_ ; /** * required uint64 spaceConsumed = 5; */ public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 spaceConsumed = 5; */ public long getSpaceConsumed() { return spaceConsumed_; } /** * required uint64 spaceConsumed = 5; */ public Builder setSpaceConsumed(long value) { bitField0_ |= 0x00000010; spaceConsumed_ = value; onChanged(); return this; } /** * required uint64 spaceConsumed = 5; */ public Builder clearSpaceConsumed() { bitField0_ = (bitField0_ & ~0x00000010); spaceConsumed_ = 0L; onChanged(); return this; } private long spaceQuota_ ; /** * required uint64 spaceQuota = 6; */ public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000020) != 0); } /** * required uint64 spaceQuota = 6; */ public long getSpaceQuota() { return spaceQuota_; } /** * required uint64 spaceQuota = 6; */ public Builder setSpaceQuota(long value) { bitField0_ |= 0x00000020; spaceQuota_ = value; onChanged(); return this; } /** * required uint64 spaceQuota = 6; */ public Builder clearSpaceQuota() { bitField0_ = (bitField0_ & ~0x00000020); spaceQuota_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } else { return typeQuotaInfosBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (value == null) { throw new NullPointerException(); } typeQuotaInfos_ = value; onChanged(); } else { typeQuotaInfosBuilder_.setMessage(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder setTypeQuotaInfos( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = builderForValue.build(); onChanged(); } else { typeQuotaInfosBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && typeQuotaInfos_ != null && typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) { typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder(typeQuotaInfos_).mergeFrom(value).buildPartial(); } else { typeQuotaInfos_ = value; } onChanged(); } else { typeQuotaInfosBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder clearTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = null; onChanged(); } else { typeQuotaInfosBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() { bitField0_ |= 0x00000040; onChanged(); return getTypeQuotaInfosFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { if (typeQuotaInfosBuilder_ != null) { return typeQuotaInfosBuilder_.getMessageOrBuilder(); } else { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> getTypeQuotaInfosFieldBuilder() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfosBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>( getTypeQuotaInfos(), getParentForChildren(), isClean()); typeQuotaInfos_ = null; } return typeQuotaInfosBuilder_; } private long snapshotLength_ ; /** * optional uint64 snapshotLength = 8; */ public boolean hasSnapshotLength() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 snapshotLength = 8; */ public long getSnapshotLength() { return snapshotLength_; } /** * optional uint64 snapshotLength = 8; */ public Builder setSnapshotLength(long value) { bitField0_ |= 0x00000080; snapshotLength_ = value; onChanged(); return this; } /** * optional uint64 snapshotLength = 8; */ public Builder clearSnapshotLength() { bitField0_ = (bitField0_ & ~0x00000080); snapshotLength_ = 0L; onChanged(); return this; } private long snapshotFileCount_ ; /** * optional uint64 snapshotFileCount = 9; */ public boolean hasSnapshotFileCount() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 snapshotFileCount = 9; */ public long getSnapshotFileCount() { return snapshotFileCount_; } /** * optional uint64 snapshotFileCount = 9; */ public Builder setSnapshotFileCount(long value) { bitField0_ |= 0x00000100; snapshotFileCount_ = value; onChanged(); return this; } /** * optional uint64 snapshotFileCount = 9; */ public Builder clearSnapshotFileCount() { bitField0_ = (bitField0_ & ~0x00000100); snapshotFileCount_ = 0L; onChanged(); return this; } private long snapshotDirectoryCount_ ; /** * optional uint64 snapshotDirectoryCount = 10; */ public boolean hasSnapshotDirectoryCount() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint64 snapshotDirectoryCount = 10; */ public long getSnapshotDirectoryCount() { return snapshotDirectoryCount_; } /** * optional uint64 snapshotDirectoryCount = 10; */ public Builder setSnapshotDirectoryCount(long value) { bitField0_ |= 0x00000200; snapshotDirectoryCount_ = value; onChanged(); return this; } /** * optional uint64 snapshotDirectoryCount = 10; */ public Builder clearSnapshotDirectoryCount() { bitField0_ = (bitField0_ & ~0x00000200); snapshotDirectoryCount_ = 0L; onChanged(); return this; } private long snapshotSpaceConsumed_ ; /** * optional uint64 snapshotSpaceConsumed = 11; */ public boolean hasSnapshotSpaceConsumed() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 snapshotSpaceConsumed = 11; */ public long getSnapshotSpaceConsumed() { return snapshotSpaceConsumed_; } /** * optional uint64 snapshotSpaceConsumed = 11; */ public Builder setSnapshotSpaceConsumed(long value) { bitField0_ |= 0x00000400; snapshotSpaceConsumed_ = value; onChanged(); return this; } /** * optional uint64 snapshotSpaceConsumed = 11; */ public Builder clearSnapshotSpaceConsumed() { bitField0_ = (bitField0_ & ~0x00000400); snapshotSpaceConsumed_ = 0L; onChanged(); return this; } private java.lang.Object erasureCodingPolicy_ = ""; /** * optional string erasureCodingPolicy = 12; */ public boolean hasErasureCodingPolicy() { return ((bitField0_ & 0x00000800) != 0); } /** * optional string erasureCodingPolicy = 12; */ public java.lang.String getErasureCodingPolicy() { java.lang.Object ref = erasureCodingPolicy_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { erasureCodingPolicy_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string erasureCodingPolicy = 12; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getErasureCodingPolicyBytes() { java.lang.Object ref = erasureCodingPolicy_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); erasureCodingPolicy_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string erasureCodingPolicy = 12; */ public Builder setErasureCodingPolicy( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000800; erasureCodingPolicy_ = value; onChanged(); return this; } /** * optional string erasureCodingPolicy = 12; */ public Builder clearErasureCodingPolicy() { bitField0_ = (bitField0_ & ~0x00000800); erasureCodingPolicy_ = getDefaultInstance().getErasureCodingPolicy(); onChanged(); return this; } /** * optional string erasureCodingPolicy = 12; */ public Builder setErasureCodingPolicyBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000800; erasureCodingPolicy_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ContentSummaryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ContentSummaryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ContentSummaryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ContentSummaryProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface QuotaUsageProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.QuotaUsageProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 fileAndDirectoryCount = 1; */ boolean hasFileAndDirectoryCount(); /** * required uint64 fileAndDirectoryCount = 1; */ long getFileAndDirectoryCount(); /** * required uint64 quota = 2; */ boolean hasQuota(); /** * required uint64 quota = 2; */ long getQuota(); /** * required uint64 spaceConsumed = 3; */ boolean hasSpaceConsumed(); /** * required uint64 spaceConsumed = 3; */ long getSpaceConsumed(); /** * required uint64 spaceQuota = 4; */ boolean hasSpaceQuota(); /** * required uint64 spaceQuota = 4; */ long getSpaceQuota(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ boolean hasTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder(); } /** *
   **
   * Summary of quota usage of a directory
   * 
* * Protobuf type {@code hadoop.hdfs.QuotaUsageProto} */ public static final class QuotaUsageProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.QuotaUsageProto) QuotaUsageProtoOrBuilder { private static final long serialVersionUID = 0L; // Use QuotaUsageProto.newBuilder() to construct. private QuotaUsageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private QuotaUsageProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private QuotaUsageProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; fileAndDirectoryCount_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; quota_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; spaceConsumed_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; spaceQuota_ = input.readUInt64(); break; } case 42: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder subBuilder = null; if (((bitField0_ & 0x00000010) != 0)) { subBuilder = typeQuotaInfos_.toBuilder(); } typeQuotaInfos_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(typeQuotaInfos_); typeQuotaInfos_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class); } private int bitField0_; public static final int FILEANDDIRECTORYCOUNT_FIELD_NUMBER = 1; private long fileAndDirectoryCount_; /** * required uint64 fileAndDirectoryCount = 1; */ public boolean hasFileAndDirectoryCount() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileAndDirectoryCount = 1; */ public long getFileAndDirectoryCount() { return fileAndDirectoryCount_; } public static final int QUOTA_FIELD_NUMBER = 2; private long quota_; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } public static final int SPACECONSUMED_FIELD_NUMBER = 3; private long spaceConsumed_; /** * required uint64 spaceConsumed = 3; */ public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 spaceConsumed = 3; */ public long getSpaceConsumed() { return spaceConsumed_; } public static final int SPACEQUOTA_FIELD_NUMBER = 4; private long spaceQuota_; /** * required uint64 spaceQuota = 4; */ public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 spaceQuota = 4; */ public long getSpaceQuota() { return spaceQuota_; } public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFileAndDirectoryCount()) { memoizedIsInitialized = 0; return false; } if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceConsumed()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceQuota()) { memoizedIsInitialized = 0; return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, fileAndDirectoryCount_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, spaceConsumed_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, spaceQuota_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(5, getTypeQuotaInfos()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, fileAndDirectoryCount_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, spaceConsumed_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, spaceQuota_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getTypeQuotaInfos()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) obj; if (hasFileAndDirectoryCount() != other.hasFileAndDirectoryCount()) return false; if (hasFileAndDirectoryCount()) { if (getFileAndDirectoryCount() != other.getFileAndDirectoryCount()) return false; } if (hasQuota() != other.hasQuota()) return false; if (hasQuota()) { if (getQuota() != other.getQuota()) return false; } if (hasSpaceConsumed() != other.hasSpaceConsumed()) return false; if (hasSpaceConsumed()) { if (getSpaceConsumed() != other.getSpaceConsumed()) return false; } if (hasSpaceQuota() != other.hasSpaceQuota()) return false; if (hasSpaceQuota()) { if (getSpaceQuota() != other.getSpaceQuota()) return false; } if (hasTypeQuotaInfos() != other.hasTypeQuotaInfos()) return false; if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos() .equals(other.getTypeQuotaInfos())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFileAndDirectoryCount()) { hash = (37 * hash) + FILEANDDIRECTORYCOUNT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileAndDirectoryCount()); } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getQuota()); } if (hasSpaceConsumed()) { hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceConsumed()); } if (hasSpaceQuota()) { hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpaceQuota()); } if (hasTypeQuotaInfos()) { hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfos().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Summary of quota usage of a directory
     * 
* * Protobuf type {@code hadoop.hdfs.QuotaUsageProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.QuotaUsageProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getTypeQuotaInfosFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); fileAndDirectoryCount_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); quota_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); spaceConsumed_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); spaceQuota_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = null; } else { typeQuotaInfosBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fileAndDirectoryCount_ = fileAndDirectoryCount_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.quota_ = quota_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.spaceConsumed_ = spaceConsumed_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.spaceQuota_ = spaceQuota_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { if (typeQuotaInfosBuilder_ == null) { result.typeQuotaInfos_ = typeQuotaInfos_; } else { result.typeQuotaInfos_ = typeQuotaInfosBuilder_.build(); } to_bitField0_ |= 0x00000010; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance()) return this; if (other.hasFileAndDirectoryCount()) { setFileAndDirectoryCount(other.getFileAndDirectoryCount()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasSpaceConsumed()) { setSpaceConsumed(other.getSpaceConsumed()); } if (other.hasSpaceQuota()) { setSpaceQuota(other.getSpaceQuota()); } if (other.hasTypeQuotaInfos()) { mergeTypeQuotaInfos(other.getTypeQuotaInfos()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFileAndDirectoryCount()) { return false; } if (!hasQuota()) { return false; } if (!hasSpaceConsumed()) { return false; } if (!hasSpaceQuota()) { return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long fileAndDirectoryCount_ ; /** * required uint64 fileAndDirectoryCount = 1; */ public boolean hasFileAndDirectoryCount() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileAndDirectoryCount = 1; */ public long getFileAndDirectoryCount() { return fileAndDirectoryCount_; } /** * required uint64 fileAndDirectoryCount = 1; */ public Builder setFileAndDirectoryCount(long value) { bitField0_ |= 0x00000001; fileAndDirectoryCount_ = value; onChanged(); return this; } /** * required uint64 fileAndDirectoryCount = 1; */ public Builder clearFileAndDirectoryCount() { bitField0_ = (bitField0_ & ~0x00000001); fileAndDirectoryCount_ = 0L; onChanged(); return this; } private long quota_ ; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } /** * required uint64 quota = 2; */ public Builder setQuota(long value) { bitField0_ |= 0x00000002; quota_ = value; onChanged(); return this; } /** * required uint64 quota = 2; */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000002); quota_ = 0L; onChanged(); return this; } private long spaceConsumed_ ; /** * required uint64 spaceConsumed = 3; */ public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 spaceConsumed = 3; */ public long getSpaceConsumed() { return spaceConsumed_; } /** * required uint64 spaceConsumed = 3; */ public Builder setSpaceConsumed(long value) { bitField0_ |= 0x00000004; spaceConsumed_ = value; onChanged(); return this; } /** * required uint64 spaceConsumed = 3; */ public Builder clearSpaceConsumed() { bitField0_ = (bitField0_ & ~0x00000004); spaceConsumed_ = 0L; onChanged(); return this; } private long spaceQuota_ ; /** * required uint64 spaceQuota = 4; */ public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000008) != 0); } /** * required uint64 spaceQuota = 4; */ public long getSpaceQuota() { return spaceQuota_; } /** * required uint64 spaceQuota = 4; */ public Builder setSpaceQuota(long value) { bitField0_ |= 0x00000008; spaceQuota_ = value; onChanged(); return this; } /** * required uint64 spaceQuota = 4; */ public Builder clearSpaceQuota() { bitField0_ = (bitField0_ & ~0x00000008); spaceQuota_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } else { return typeQuotaInfosBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (value == null) { throw new NullPointerException(); } typeQuotaInfos_ = value; onChanged(); } else { typeQuotaInfosBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder setTypeQuotaInfos( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = builderForValue.build(); onChanged(); } else { typeQuotaInfosBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && typeQuotaInfos_ != null && typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) { typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder(typeQuotaInfos_).mergeFrom(value).buildPartial(); } else { typeQuotaInfos_ = value; } onChanged(); } else { typeQuotaInfosBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public Builder clearTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = null; onChanged(); } else { typeQuotaInfosBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() { bitField0_ |= 0x00000010; onChanged(); return getTypeQuotaInfosFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { if (typeQuotaInfosBuilder_ != null) { return typeQuotaInfosBuilder_.getMessageOrBuilder(); } else { return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_; } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> getTypeQuotaInfosFieldBuilder() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfosBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>( getTypeQuotaInfos(), getParentForChildren(), isClean()); typeQuotaInfos_ = null; } return typeQuotaInfosBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.QuotaUsageProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.QuotaUsageProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public QuotaUsageProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new QuotaUsageProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageTypeQuotaInfosProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypeQuotaInfosProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ java.util.List getTypeQuotaInfoList(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ int getTypeQuotaInfoCount(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ java.util.List getTypeQuotaInfoOrBuilderList(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index); } /** *
   **
   * Storage type quota and usage information of a file or directory
   * 
* * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto} */ public static final class StorageTypeQuotaInfosProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypeQuotaInfosProto) StorageTypeQuotaInfosProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageTypeQuotaInfosProto.newBuilder() to construct. private StorageTypeQuotaInfosProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageTypeQuotaInfosProto() { typeQuotaInfo_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageTypeQuotaInfosProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { typeQuotaInfo_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } typeQuotaInfo_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class); } public static final int TYPEQUOTAINFO_FIELD_NUMBER = 1; private java.util.List typeQuotaInfo_; /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoList() { return typeQuotaInfo_; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoOrBuilderList() { return typeQuotaInfo_; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public int getTypeQuotaInfoCount() { return typeQuotaInfo_.size(); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) { return typeQuotaInfo_.get(index); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index) { return typeQuotaInfo_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getTypeQuotaInfoCount(); i++) { if (!getTypeQuotaInfo(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < typeQuotaInfo_.size(); i++) { output.writeMessage(1, typeQuotaInfo_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < typeQuotaInfo_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, typeQuotaInfo_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) obj; if (!getTypeQuotaInfoList() .equals(other.getTypeQuotaInfoList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getTypeQuotaInfoCount() > 0) { hash = (37 * hash) + TYPEQUOTAINFO_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfoList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Storage type quota and usage information of a file or directory
     * 
* * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypeQuotaInfosProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getTypeQuotaInfoFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (typeQuotaInfoBuilder_ == null) { typeQuotaInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { typeQuotaInfoBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(this); int from_bitField0_ = bitField0_; if (typeQuotaInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_); bitField0_ = (bitField0_ & ~0x00000001); } result.typeQuotaInfo_ = typeQuotaInfo_; } else { result.typeQuotaInfo_ = typeQuotaInfoBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) return this; if (typeQuotaInfoBuilder_ == null) { if (!other.typeQuotaInfo_.isEmpty()) { if (typeQuotaInfo_.isEmpty()) { typeQuotaInfo_ = other.typeQuotaInfo_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.addAll(other.typeQuotaInfo_); } onChanged(); } } else { if (!other.typeQuotaInfo_.isEmpty()) { if (typeQuotaInfoBuilder_.isEmpty()) { typeQuotaInfoBuilder_.dispose(); typeQuotaInfoBuilder_ = null; typeQuotaInfo_ = other.typeQuotaInfo_; bitField0_ = (bitField0_ & ~0x00000001); typeQuotaInfoBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getTypeQuotaInfoFieldBuilder() : null; } else { typeQuotaInfoBuilder_.addAllMessages(other.typeQuotaInfo_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getTypeQuotaInfoCount(); i++) { if (!getTypeQuotaInfo(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List typeQuotaInfo_ = java.util.Collections.emptyList(); private void ensureTypeQuotaInfoIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { typeQuotaInfo_ = new java.util.ArrayList(typeQuotaInfo_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> typeQuotaInfoBuilder_; /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoList() { if (typeQuotaInfoBuilder_ == null) { return java.util.Collections.unmodifiableList(typeQuotaInfo_); } else { return typeQuotaInfoBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public int getTypeQuotaInfoCount() { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.size(); } else { return typeQuotaInfoBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.get(index); } else { return typeQuotaInfoBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder setTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.set(index, value); onChanged(); } else { typeQuotaInfoBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder setTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.set(index, builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(value); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(index, value); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(index, builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addAllTypeQuotaInfo( java.lang.Iterable values) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, typeQuotaInfo_); onChanged(); } else { typeQuotaInfoBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder clearTypeQuotaInfo() { if (typeQuotaInfoBuilder_ == null) { typeQuotaInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { typeQuotaInfoBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder removeTypeQuotaInfo(int index) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.remove(index); onChanged(); } else { typeQuotaInfoBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder getTypeQuotaInfoBuilder( int index) { return getTypeQuotaInfoFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index) { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.get(index); } else { return typeQuotaInfoBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoOrBuilderList() { if (typeQuotaInfoBuilder_ != null) { return typeQuotaInfoBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(typeQuotaInfo_); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder() { return getTypeQuotaInfoFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder( int index) { return getTypeQuotaInfoFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoBuilderList() { return getTypeQuotaInfoFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> getTypeQuotaInfoFieldBuilder() { if (typeQuotaInfoBuilder_ == null) { typeQuotaInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>( typeQuotaInfo_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); typeQuotaInfo_ = null; } return typeQuotaInfoBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfosProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfosProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageTypeQuotaInfosProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StorageTypeQuotaInfosProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageTypeQuotaInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypeQuotaInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ boolean hasType(); /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType(); /** * required uint64 quota = 2; */ boolean hasQuota(); /** * required uint64 quota = 2; */ long getQuota(); /** * required uint64 consumed = 3; */ boolean hasConsumed(); /** * required uint64 consumed = 3; */ long getConsumed(); } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto} */ public static final class StorageTypeQuotaInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypeQuotaInfoProto) StorageTypeQuotaInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageTypeQuotaInfoProto.newBuilder() to construct. private StorageTypeQuotaInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageTypeQuotaInfoProto() { type_ = 1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageTypeQuotaInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; type_ = rawValue; } break; } case 16: { bitField0_ |= 0x00000002; quota_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; consumed_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class); } private int bitField0_; public static final int TYPE_FIELD_NUMBER = 1; private int type_; /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(type_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } public static final int QUOTA_FIELD_NUMBER = 2; private long quota_; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } public static final int CONSUMED_FIELD_NUMBER = 3; private long consumed_; /** * required uint64 consumed = 3; */ public boolean hasConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 consumed = 3; */ public long getConsumed() { return consumed_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasConsumed()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, consumed_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, quota_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, consumed_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) obj; if (hasType() != other.hasType()) return false; if (hasType()) { if (type_ != other.type_) return false; } if (hasQuota() != other.hasQuota()) return false; if (hasQuota()) { if (getQuota() != other.getQuota()) return false; } if (hasConsumed() != other.hasConsumed()) return false; if (hasConsumed()) { if (getConsumed() != other.getConsumed()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + type_; } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getQuota()); } if (hasConsumed()) { hash = (37 * hash) + CONSUMED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getConsumed()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypeQuotaInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); type_ = 1; bitField0_ = (bitField0_ & ~0x00000001); quota_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); consumed_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; if (((from_bitField0_ & 0x00000002) != 0)) { result.quota_ = quota_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.consumed_ = consumed_; to_bitField0_ |= 0x00000004; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()) return this; if (other.hasType()) { setType(other.getType()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasConsumed()) { setConsumed(other.getConsumed()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasQuota()) { return false; } if (!hasConsumed()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int type_ = 1; /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(type_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; type_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK]; */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); type_ = 1; onChanged(); return this; } private long quota_ ; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } /** * required uint64 quota = 2; */ public Builder setQuota(long value) { bitField0_ |= 0x00000002; quota_ = value; onChanged(); return this; } /** * required uint64 quota = 2; */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000002); quota_ = 0L; onChanged(); return this; } private long consumed_ ; /** * required uint64 consumed = 3; */ public boolean hasConsumed() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 consumed = 3; */ public long getConsumed() { return consumed_; } /** * required uint64 consumed = 3; */ public Builder setConsumed(long value) { bitField0_ |= 0x00000004; consumed_ = value; onChanged(); return this; } /** * required uint64 consumed = 3; */ public Builder clearConsumed() { bitField0_ = (bitField0_ & ~0x00000004); consumed_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageTypeQuotaInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StorageTypeQuotaInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface CorruptFileBlocksProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CorruptFileBlocksProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated string files = 1; */ java.util.List getFilesList(); /** * repeated string files = 1; */ int getFilesCount(); /** * repeated string files = 1; */ java.lang.String getFiles(int index); /** * repeated string files = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getFilesBytes(int index); /** * required string cookie = 2; */ boolean hasCookie(); /** * required string cookie = 2; */ java.lang.String getCookie(); /** * required string cookie = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getCookieBytes(); } /** *
   **
   * Contains a list of paths corresponding to corrupt files and a cookie
   * used for iterative calls to NameNode.listCorruptFileBlocks.
   * 
* * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto} */ public static final class CorruptFileBlocksProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.CorruptFileBlocksProto) CorruptFileBlocksProtoOrBuilder { private static final long serialVersionUID = 0L; // Use CorruptFileBlocksProto.newBuilder() to construct. private CorruptFileBlocksProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private CorruptFileBlocksProto() { files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; cookie_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CorruptFileBlocksProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); if (!((mutable_bitField0_ & 0x00000001) != 0)) { files_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000001; } files_.add(bs); break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; cookie_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { files_ = files_.getUnmodifiableView(); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); } private int bitField0_; public static final int FILES_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.LazyStringList files_; /** * repeated string files = 1; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getFilesList() { return files_; } /** * repeated string files = 1; */ public int getFilesCount() { return files_.size(); } /** * repeated string files = 1; */ public java.lang.String getFiles(int index) { return files_.get(index); } /** * repeated string files = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFilesBytes(int index) { return files_.getByteString(index); } public static final int COOKIE_FIELD_NUMBER = 2; private volatile java.lang.Object cookie_; /** * required string cookie = 2; */ public boolean hasCookie() { return ((bitField0_ & 0x00000001) != 0); } /** * required string cookie = 2; */ public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { cookie_ = s; } return s; } } /** * required string cookie = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasCookie()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < files_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, files_.getRaw(i)); } if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, cookie_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < files_.size(); i++) { dataSize += computeStringSizeNoTag(files_.getRaw(i)); } size += dataSize; size += 1 * getFilesList().size(); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, cookie_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj; if (!getFilesList() .equals(other.getFilesList())) return false; if (hasCookie() != other.hasCookie()) return false; if (hasCookie()) { if (!getCookie() .equals(other.getCookie())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getFilesCount() > 0) { hash = (37 * hash) + FILES_FIELD_NUMBER; hash = (53 * hash) + getFilesList().hashCode(); } if (hasCookie()) { hash = (37 * hash) + COOKIE_FIELD_NUMBER; hash = (53 * hash) + getCookie().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Contains a list of paths corresponding to corrupt files and a cookie
     * used for iterative calls to NameNode.listCorruptFileBlocks.
     * 
* * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CorruptFileBlocksProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); cookie_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((bitField0_ & 0x00000001) != 0)) { files_ = files_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000001); } result.files_ = files_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000001; } result.cookie_ = cookie_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this; if (!other.files_.isEmpty()) { if (files_.isEmpty()) { files_ = other.files_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureFilesIsMutable(); files_.addAll(other.files_); } onChanged(); } if (other.hasCookie()) { bitField0_ |= 0x00000002; cookie_ = other.cookie_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasCookie()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.LazyStringList files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureFilesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { files_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(files_); bitField0_ |= 0x00000001; } } /** * repeated string files = 1; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getFilesList() { return files_.getUnmodifiableView(); } /** * repeated string files = 1; */ public int getFilesCount() { return files_.size(); } /** * repeated string files = 1; */ public java.lang.String getFiles(int index) { return files_.get(index); } /** * repeated string files = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFilesBytes(int index) { return files_.getByteString(index); } /** * repeated string files = 1; */ public Builder setFiles( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.set(index, value); onChanged(); return this; } /** * repeated string files = 1; */ public Builder addFiles( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.add(value); onChanged(); return this; } /** * repeated string files = 1; */ public Builder addAllFiles( java.lang.Iterable values) { ensureFilesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, files_); onChanged(); return this; } /** * repeated string files = 1; */ public Builder clearFiles() { files_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string files = 1; */ public Builder addFilesBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.add(value); onChanged(); return this; } private java.lang.Object cookie_ = ""; /** * required string cookie = 2; */ public boolean hasCookie() { return ((bitField0_ & 0x00000002) != 0); } /** * required string cookie = 2; */ public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { cookie_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string cookie = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string cookie = 2; */ public Builder setCookie( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cookie_ = value; onChanged(); return this; } /** * required string cookie = 2; */ public Builder clearCookie() { bitField0_ = (bitField0_ & ~0x00000002); cookie_ = getDefaultInstance().getCookie(); onChanged(); return this; } /** * required string cookie = 2; */ public Builder setCookieBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cookie_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CorruptFileBlocksProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CorruptFileBlocksProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public CorruptFileBlocksProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new CorruptFileBlocksProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageTypesProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypesProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); } /** *
   **
   * A list of storage types. 
   * 
* * Protobuf type {@code hadoop.hdfs.StorageTypesProto} */ public static final class StorageTypesProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypesProto) StorageTypesProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageTypesProto.newBuilder() to construct. private StorageTypesProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageTypesProto() { storageTypes_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageTypesProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { if (!((mutable_bitField0_ & 0x00000001) != 0)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } storageTypes_.add(rawValue); } break; } case 10: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { if (!((mutable_bitField0_ & 0x00000001) != 0)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } storageTypes_.add(rawValue); } } input.popLimit(oldLimit); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class); } public static final int STORAGETYPES_FIELD_NUMBER = 1; private java.util.List storageTypes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } }; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(1, storageTypes_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i)); } size += dataSize; size += 1 * storageTypes_.size(); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) obj; if (!storageTypes_.equals(other.storageTypes_)) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + storageTypes_.hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A list of storage types. 
     * 
* * Protobuf type {@code hadoop.hdfs.StorageTypesProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypesProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000001); } result.storageTypes_ = storageTypes_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) return this; if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000001; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) { storageTypes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypesProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypesProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageTypesProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StorageTypesProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockStoragePolicyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockStoragePolicyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint32 policyId = 1; */ boolean hasPolicyId(); /** * required uint32 policyId = 1; */ int getPolicyId(); /** * required string name = 2; */ boolean hasName(); /** * required string name = 2; */ java.lang.String getName(); /** * required string name = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes(); /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ boolean hasCreationPolicy(); /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy(); /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder(); /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ boolean hasCreationFallbackPolicy(); /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy(); /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ boolean hasReplicationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder(); } /** *
   **
   * Block replica storage policy.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto} */ public static final class BlockStoragePolicyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockStoragePolicyProto) BlockStoragePolicyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockStoragePolicyProto.newBuilder() to construct. private BlockStoragePolicyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockStoragePolicyProto() { name_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockStoragePolicyProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; policyId_ = input.readUInt32(); break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; name_ = bs; break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) != 0)) { subBuilder = creationPolicy_.toBuilder(); } creationPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(creationPolicy_); creationPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) != 0)) { subBuilder = creationFallbackPolicy_.toBuilder(); } creationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(creationFallbackPolicy_); creationFallbackPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null; if (((bitField0_ & 0x00000010) != 0)) { subBuilder = replicationFallbackPolicy_.toBuilder(); } replicationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(replicationFallbackPolicy_); replicationFallbackPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class); } private int bitField0_; public static final int POLICYID_FIELD_NUMBER = 1; private int policyId_; /** * required uint32 policyId = 1; */ public boolean hasPolicyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 policyId = 1; */ public int getPolicyId() { return policyId_; } public static final int NAME_FIELD_NUMBER = 2; private volatile java.lang.Object name_; /** * required string name = 2; */ public boolean hasName() { return ((bitField0_ & 0x00000002) != 0); } /** * required string name = 2; */ public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * required string name = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int CREATIONPOLICY_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_; /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public boolean hasCreationPolicy() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } /** *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } public static final int CREATIONFALLBACKPOLICY_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_; /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public boolean hasCreationFallbackPolicy() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } /** *
     * A list of storage types for creation fallback storage.
     * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } public static final int REPLICATIONFALLBACKPOLICY_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_; /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public boolean hasReplicationFallbackPolicy() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPolicyId()) { memoizedIsInitialized = 0; return false; } if (!hasName()) { memoizedIsInitialized = 0; return false; } if (!hasCreationPolicy()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, policyId_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, name_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeMessage(3, getCreationPolicy()); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getCreationFallbackPolicy()); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(5, getReplicationFallbackPolicy()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, policyId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, name_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, getCreationPolicy()); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getCreationFallbackPolicy()); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getReplicationFallbackPolicy()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) obj; if (hasPolicyId() != other.hasPolicyId()) return false; if (hasPolicyId()) { if (getPolicyId() != other.getPolicyId()) return false; } if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasCreationPolicy() != other.hasCreationPolicy()) return false; if (hasCreationPolicy()) { if (!getCreationPolicy() .equals(other.getCreationPolicy())) return false; } if (hasCreationFallbackPolicy() != other.hasCreationFallbackPolicy()) return false; if (hasCreationFallbackPolicy()) { if (!getCreationFallbackPolicy() .equals(other.getCreationFallbackPolicy())) return false; } if (hasReplicationFallbackPolicy() != other.hasReplicationFallbackPolicy()) return false; if (hasReplicationFallbackPolicy()) { if (!getReplicationFallbackPolicy() .equals(other.getReplicationFallbackPolicy())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPolicyId()) { hash = (37 * hash) + POLICYID_FIELD_NUMBER; hash = (53 * hash) + getPolicyId(); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasCreationPolicy()) { hash = (37 * hash) + CREATIONPOLICY_FIELD_NUMBER; hash = (53 * hash) + getCreationPolicy().hashCode(); } if (hasCreationFallbackPolicy()) { hash = (37 * hash) + CREATIONFALLBACKPOLICY_FIELD_NUMBER; hash = (53 * hash) + getCreationFallbackPolicy().hashCode(); } if (hasReplicationFallbackPolicy()) { hash = (37 * hash) + REPLICATIONFALLBACKPOLICY_FIELD_NUMBER; hash = (53 * hash) + getReplicationFallbackPolicy().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Block replica storage policy.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockStoragePolicyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getCreationPolicyFieldBuilder(); getCreationFallbackPolicyFieldBuilder(); getReplicationFallbackPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); policyId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); name_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (creationPolicyBuilder_ == null) { creationPolicy_ = null; } else { creationPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicy_ = null; } else { creationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicy_ = null; } else { replicationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.policyId_ = policyId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.name_ = name_; if (((from_bitField0_ & 0x00000004) != 0)) { if (creationPolicyBuilder_ == null) { result.creationPolicy_ = creationPolicy_; } else { result.creationPolicy_ = creationPolicyBuilder_.build(); } to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { if (creationFallbackPolicyBuilder_ == null) { result.creationFallbackPolicy_ = creationFallbackPolicy_; } else { result.creationFallbackPolicy_ = creationFallbackPolicyBuilder_.build(); } to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { if (replicationFallbackPolicyBuilder_ == null) { result.replicationFallbackPolicy_ = replicationFallbackPolicy_; } else { result.replicationFallbackPolicy_ = replicationFallbackPolicyBuilder_.build(); } to_bitField0_ |= 0x00000010; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()) return this; if (other.hasPolicyId()) { setPolicyId(other.getPolicyId()); } if (other.hasName()) { bitField0_ |= 0x00000002; name_ = other.name_; onChanged(); } if (other.hasCreationPolicy()) { mergeCreationPolicy(other.getCreationPolicy()); } if (other.hasCreationFallbackPolicy()) { mergeCreationFallbackPolicy(other.getCreationFallbackPolicy()); } if (other.hasReplicationFallbackPolicy()) { mergeReplicationFallbackPolicy(other.getReplicationFallbackPolicy()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPolicyId()) { return false; } if (!hasName()) { return false; } if (!hasCreationPolicy()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int policyId_ ; /** * required uint32 policyId = 1; */ public boolean hasPolicyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 policyId = 1; */ public int getPolicyId() { return policyId_; } /** * required uint32 policyId = 1; */ public Builder setPolicyId(int value) { bitField0_ |= 0x00000001; policyId_ = value; onChanged(); return this; } /** * required uint32 policyId = 1; */ public Builder clearPolicyId() { bitField0_ = (bitField0_ & ~0x00000001); policyId_ = 0; onChanged(); return this; } private java.lang.Object name_ = ""; /** * required string name = 2; */ public boolean hasName() { return ((bitField0_ & 0x00000002) != 0); } /** * required string name = 2; */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string name = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string name = 2; */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; name_ = value; onChanged(); return this; } /** * required string name = 2; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000002); name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * required string name = 2; */ public Builder setNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; name_ = value; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationPolicyBuilder_; /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public boolean hasCreationPolicy() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() { if (creationPolicyBuilder_ == null) { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } else { return creationPolicyBuilder_.getMessage(); } } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder setCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } creationPolicy_ = value; onChanged(); } else { creationPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder setCreationPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (creationPolicyBuilder_ == null) { creationPolicy_ = builderForValue.build(); onChanged(); } else { creationPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder mergeCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationPolicyBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0) && creationPolicy_ != null && creationPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationPolicy_).mergeFrom(value).buildPartial(); } else { creationPolicy_ = value; } onChanged(); } else { creationPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public Builder clearCreationPolicy() { if (creationPolicyBuilder_ == null) { creationPolicy_ = null; onChanged(); } else { creationPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationPolicyBuilder() { bitField0_ |= 0x00000004; onChanged(); return getCreationPolicyFieldBuilder().getBuilder(); } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() { if (creationPolicyBuilder_ != null) { return creationPolicyBuilder_.getMessageOrBuilder(); } else { return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_; } } /** *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
* * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getCreationPolicyFieldBuilder() { if (creationPolicyBuilder_ == null) { creationPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( getCreationPolicy(), getParentForChildren(), isClean()); creationPolicy_ = null; } return creationPolicyBuilder_; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationFallbackPolicyBuilder_; /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public boolean hasCreationFallbackPolicy() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() { if (creationFallbackPolicyBuilder_ == null) { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } else { return creationFallbackPolicyBuilder_.getMessage(); } } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder setCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationFallbackPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } creationFallbackPolicy_ = value; onChanged(); } else { creationFallbackPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder setCreationFallbackPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicy_ = builderForValue.build(); onChanged(); } else { creationFallbackPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder mergeCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationFallbackPolicyBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && creationFallbackPolicy_ != null && creationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationFallbackPolicy_).mergeFrom(value).buildPartial(); } else { creationFallbackPolicy_ = value; } onChanged(); } else { creationFallbackPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public Builder clearCreationFallbackPolicy() { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicy_ = null; onChanged(); } else { creationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationFallbackPolicyBuilder() { bitField0_ |= 0x00000008; onChanged(); return getCreationFallbackPolicyFieldBuilder().getBuilder(); } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() { if (creationFallbackPolicyBuilder_ != null) { return creationFallbackPolicyBuilder_.getMessageOrBuilder(); } else { return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_; } } /** *
       * A list of storage types for creation fallback storage.
       * 
* * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getCreationFallbackPolicyFieldBuilder() { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( getCreationFallbackPolicy(), getParentForChildren(), isClean()); creationFallbackPolicy_ = null; } return creationFallbackPolicyBuilder_; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> replicationFallbackPolicyBuilder_; /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public boolean hasReplicationFallbackPolicy() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() { if (replicationFallbackPolicyBuilder_ == null) { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } else { return replicationFallbackPolicyBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder setReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (replicationFallbackPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } replicationFallbackPolicy_ = value; onChanged(); } else { replicationFallbackPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder setReplicationFallbackPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicy_ = builderForValue.build(); onChanged(); } else { replicationFallbackPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder mergeReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (replicationFallbackPolicyBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && replicationFallbackPolicy_ != null && replicationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(replicationFallbackPolicy_).mergeFrom(value).buildPartial(); } else { replicationFallbackPolicy_ = value; } onChanged(); } else { replicationFallbackPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder clearReplicationFallbackPolicy() { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicy_ = null; onChanged(); } else { replicationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getReplicationFallbackPolicyBuilder() { bitField0_ |= 0x00000010; onChanged(); return getReplicationFallbackPolicyFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() { if (replicationFallbackPolicyBuilder_ != null) { return replicationFallbackPolicyBuilder_.getMessageOrBuilder(); } else { return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_; } } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getReplicationFallbackPolicyFieldBuilder() { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( getReplicationFallbackPolicy(), getParentForChildren(), isClean()); replicationFallbackPolicy_ = null; } return replicationFallbackPolicyBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockStoragePolicyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockStoragePolicyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockStoragePolicyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new BlockStoragePolicyProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface LocatedBlockProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.LocatedBlockProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ boolean hasB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder(); /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; */ boolean hasOffset(); /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; */ long getOffset(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ java.util.List getLocsList(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ int getLocsCount(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ java.util.List getLocsOrBuilderList(); /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index); /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; */ boolean hasCorrupt(); /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; */ boolean getCorrupt(); /** * required .hadoop.common.TokenProto blockToken = 5; */ boolean hasBlockToken(); /** * required .hadoop.common.TokenProto blockToken = 5; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken(); /** * required .hadoop.common.TokenProto blockToken = 5; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder(); /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; */ java.util.List getIsCachedList(); /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; */ int getIsCachedCount(); /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; */ boolean getIsCached(int index); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); /** * repeated string storageIDs = 8; */ java.util.List getStorageIDsList(); /** * repeated string storageIDs = 8; */ int getStorageIDsCount(); /** * repeated string storageIDs = 8; */ java.lang.String getStorageIDs(int index); /** * repeated string storageIDs = 8; */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIDsBytes(int index); /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; */ boolean hasBlockIndices(); /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ java.util.List getBlockTokensList(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ int getBlockTokensCount(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ java.util.List getBlockTokensOrBuilderList(); /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder( int index); } /** *
   **
   * A LocatedBlock gives information about a block and its location.
   * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlockProto} */ public static final class LocatedBlockProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.LocatedBlockProto) LocatedBlockProtoOrBuilder { private static final long serialVersionUID = 0L; // Use LocatedBlockProto.newBuilder() to construct. private LocatedBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private LocatedBlockProto() { locs_ = java.util.Collections.emptyList(); isCached_ = emptyBooleanList(); storageTypes_ = java.util.Collections.emptyList(); storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; blockTokens_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private LocatedBlockProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = b_.toBuilder(); } b_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(b_); b_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; offset_ = input.readUInt64(); break; } case 26: { if (!((mutable_bitField0_ & 0x00000004) != 0)) { locs_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } locs_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } case 32: { bitField0_ |= 0x00000004; corrupt_ = input.readBool(); break; } case 42: { org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) != 0)) { subBuilder = blockToken_.toBuilder(); } blockToken_ = input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(blockToken_); blockToken_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 48: { if (!((mutable_bitField0_ & 0x00000020) != 0)) { isCached_ = newBooleanList(); mutable_bitField0_ |= 0x00000020; } isCached_.addBoolean(input.readBool()); break; } case 50: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000020) != 0) && input.getBytesUntilLimit() > 0) { isCached_ = newBooleanList(); mutable_bitField0_ |= 0x00000020; } while (input.getBytesUntilLimit() > 0) { isCached_.addBoolean(input.readBool()); } input.popLimit(limit); break; } case 56: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } storageTypes_.add(rawValue); } break; } case 58: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } storageTypes_.add(rawValue); } } input.popLimit(oldLimit); break; } case 66: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); if (!((mutable_bitField0_ & 0x00000080) != 0)) { storageIDs_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000080; } storageIDs_.add(bs); break; } case 74: { bitField0_ |= 0x00000010; blockIndices_ = input.readBytes(); break; } case 82: { if (!((mutable_bitField0_ & 0x00000200) != 0)) { blockTokens_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000200; } blockTokens_.add( input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000004) != 0)) { locs_ = java.util.Collections.unmodifiableList(locs_); } if (((mutable_bitField0_ & 0x00000020) != 0)) { isCached_.makeImmutable(); // C } if (((mutable_bitField0_ & 0x00000040) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); } if (((mutable_bitField0_ & 0x00000080) != 0)) { storageIDs_ = storageIDs_.getUnmodifiableView(); } if (((mutable_bitField0_ & 0x00000200) != 0)) { blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); } private int bitField0_; public static final int B_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public boolean hasB() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } public static final int OFFSET_FIELD_NUMBER = 2; private long offset_; /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; */ public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * offset of first byte of block in the file
     * 
* * required uint64 offset = 2; */ public long getOffset() { return offset_; } public static final int LOCS_FIELD_NUMBER = 3; private java.util.List locs_; /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsList() { return locs_; } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsOrBuilderList() { return locs_; } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public int getLocsCount() { return locs_.size(); } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { return locs_.get(index); } /** *
     * Locations ordered by proximity to client ip
     * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index) { return locs_.get(index); } public static final int CORRUPT_FIELD_NUMBER = 4; private boolean corrupt_; /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; */ public boolean hasCorrupt() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * true if all replicas of a block are corrupt, else false
     * 
* * required bool corrupt = 4; */ public boolean getCorrupt() { return corrupt_; } public static final int BLOCKTOKEN_FIELD_NUMBER = 5; private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_; /** * required .hadoop.common.TokenProto blockToken = 5; */ public boolean hasBlockToken() { return ((bitField0_ & 0x00000008) != 0); } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } public static final int ISCACHED_FIELD_NUMBER = 6; private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList isCached_; /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; */ public java.util.List getIsCachedList() { return isCached_; } /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; */ public int getIsCachedCount() { return isCached_.size(); } /** *
     * if a location in locs is cached
     * 
* * repeated bool isCached = 6 [packed = true]; */ public boolean getIsCached(int index) { return isCached_.getBoolean(index); } private int isCachedMemoizedSerializedSize = -1; public static final int STORAGETYPES_FIELD_NUMBER = 7; private java.util.List storageTypes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } }; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } public static final int STORAGEIDS_FIELD_NUMBER = 8; private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIDs_; /** * repeated string storageIDs = 8; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIDsList() { return storageIDs_; } /** * repeated string storageIDs = 8; */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 8; */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 8; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } public static final int BLOCKINDICES_FIELD_NUMBER = 9; private org.apache.hadoop.thirdparty.protobuf.ByteString blockIndices_; /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; */ public boolean hasBlockIndices() { return ((bitField0_ & 0x00000010) != 0); } /** *
     * striped block related fields
     * 
* * optional bytes blockIndices = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices() { return blockIndices_; } public static final int BLOCKTOKENS_FIELD_NUMBER = 10; private java.util.List blockTokens_; /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensList() { return blockTokens_; } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensOrBuilderList() { return blockTokens_; } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public int getBlockTokensCount() { return blockTokens_.size(); } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) { return blockTokens_.get(index); } /** *
     * each internal block has a block token
     * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder( int index) { return blockTokens_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasB()) { memoizedIsInitialized = 0; return false; } if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasCorrupt()) { memoizedIsInitialized = 0; return false; } if (!hasBlockToken()) { memoizedIsInitialized = 0; return false; } if (!getB().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getLocsCount(); i++) { if (!getLocs(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (!getBlockToken().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getBlockTokensCount(); i++) { if (!getBlockTokens(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getB()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, offset_); } for (int i = 0; i < locs_.size(); i++) { output.writeMessage(3, locs_.get(i)); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBool(4, corrupt_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(5, getBlockToken()); } if (getIsCachedList().size() > 0) { output.writeUInt32NoTag(50); output.writeUInt32NoTag(isCachedMemoizedSerializedSize); } for (int i = 0; i < isCached_.size(); i++) { output.writeBoolNoTag(isCached_.getBoolean(i)); } for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(7, storageTypes_.get(i)); } for (int i = 0; i < storageIDs_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, storageIDs_.getRaw(i)); } if (((bitField0_ & 0x00000010) != 0)) { output.writeBytes(9, blockIndices_); } for (int i = 0; i < blockTokens_.size(); i++) { output.writeMessage(10, blockTokens_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getB()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, offset_); } for (int i = 0; i < locs_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, locs_.get(i)); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(4, corrupt_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getBlockToken()); } { int dataSize = 0; dataSize = 1 * getIsCachedList().size(); size += dataSize; if (!getIsCachedList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } isCachedMemoizedSerializedSize = dataSize; } { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i)); } size += dataSize; size += 1 * storageTypes_.size(); } { int dataSize = 0; for (int i = 0; i < storageIDs_.size(); i++) { dataSize += computeStringSizeNoTag(storageIDs_.getRaw(i)); } size += dataSize; size += 1 * getStorageIDsList().size(); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(9, blockIndices_); } for (int i = 0; i < blockTokens_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(10, blockTokens_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj; if (hasB() != other.hasB()) return false; if (hasB()) { if (!getB() .equals(other.getB())) return false; } if (hasOffset() != other.hasOffset()) return false; if (hasOffset()) { if (getOffset() != other.getOffset()) return false; } if (!getLocsList() .equals(other.getLocsList())) return false; if (hasCorrupt() != other.hasCorrupt()) return false; if (hasCorrupt()) { if (getCorrupt() != other.getCorrupt()) return false; } if (hasBlockToken() != other.hasBlockToken()) return false; if (hasBlockToken()) { if (!getBlockToken() .equals(other.getBlockToken())) return false; } if (!getIsCachedList() .equals(other.getIsCachedList())) return false; if (!storageTypes_.equals(other.storageTypes_)) return false; if (!getStorageIDsList() .equals(other.getStorageIDsList())) return false; if (hasBlockIndices() != other.hasBlockIndices()) return false; if (hasBlockIndices()) { if (!getBlockIndices() .equals(other.getBlockIndices())) return false; } if (!getBlockTokensList() .equals(other.getBlockTokensList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasB()) { hash = (37 * hash) + B_FIELD_NUMBER; hash = (53 * hash) + getB().hashCode(); } if (hasOffset()) { hash = (37 * hash) + OFFSET_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getOffset()); } if (getLocsCount() > 0) { hash = (37 * hash) + LOCS_FIELD_NUMBER; hash = (53 * hash) + getLocsList().hashCode(); } if (hasCorrupt()) { hash = (37 * hash) + CORRUPT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getCorrupt()); } if (hasBlockToken()) { hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER; hash = (53 * hash) + getBlockToken().hashCode(); } if (getIsCachedCount() > 0) { hash = (37 * hash) + ISCACHED_FIELD_NUMBER; hash = (53 * hash) + getIsCachedList().hashCode(); } if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + storageTypes_.hashCode(); } if (getStorageIDsCount() > 0) { hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageIDsList().hashCode(); } if (hasBlockIndices()) { hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER; hash = (53 * hash) + getBlockIndices().hashCode(); } if (getBlockTokensCount() > 0) { hash = (37 * hash) + BLOCKTOKENS_FIELD_NUMBER; hash = (53 * hash) + getBlockTokensList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A LocatedBlock gives information about a block and its location.
     * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlockProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.LocatedBlockProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getBFieldBuilder(); getLocsFieldBuilder(); getBlockTokenFieldBuilder(); getBlockTokensFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (bBuilder_ == null) { b_ = null; } else { bBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); offset_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); if (locsBuilder_ == null) { locs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); } else { locsBuilder_.clear(); } corrupt_ = false; bitField0_ = (bitField0_ & ~0x00000008); if (blockTokenBuilder_ == null) { blockToken_ = null; } else { blockTokenBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); isCached_ = emptyBooleanList(); bitField0_ = (bitField0_ & ~0x00000020); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000100); if (blockTokensBuilder_ == null) { blockTokens_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000200); } else { blockTokensBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (bBuilder_ == null) { result.b_ = b_; } else { result.b_ = bBuilder_.build(); } to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.offset_ = offset_; to_bitField0_ |= 0x00000002; } if (locsBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0)) { locs_ = java.util.Collections.unmodifiableList(locs_); bitField0_ = (bitField0_ & ~0x00000004); } result.locs_ = locs_; } else { result.locs_ = locsBuilder_.build(); } if (((from_bitField0_ & 0x00000008) != 0)) { result.corrupt_ = corrupt_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000010) != 0)) { if (blockTokenBuilder_ == null) { result.blockToken_ = blockToken_; } else { result.blockToken_ = blockTokenBuilder_.build(); } to_bitField0_ |= 0x00000008; } if (((bitField0_ & 0x00000020) != 0)) { isCached_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000020); } result.isCached_ = isCached_; if (((bitField0_ & 0x00000040) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000040); } result.storageTypes_ = storageTypes_; if (((bitField0_ & 0x00000080) != 0)) { storageIDs_ = storageIDs_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000080); } result.storageIDs_ = storageIDs_; if (((from_bitField0_ & 0x00000100) != 0)) { to_bitField0_ |= 0x00000010; } result.blockIndices_ = blockIndices_; if (blockTokensBuilder_ == null) { if (((bitField0_ & 0x00000200) != 0)) { blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_); bitField0_ = (bitField0_ & ~0x00000200); } result.blockTokens_ = blockTokens_; } else { result.blockTokens_ = blockTokensBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this; if (other.hasB()) { mergeB(other.getB()); } if (other.hasOffset()) { setOffset(other.getOffset()); } if (locsBuilder_ == null) { if (!other.locs_.isEmpty()) { if (locs_.isEmpty()) { locs_ = other.locs_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureLocsIsMutable(); locs_.addAll(other.locs_); } onChanged(); } } else { if (!other.locs_.isEmpty()) { if (locsBuilder_.isEmpty()) { locsBuilder_.dispose(); locsBuilder_ = null; locs_ = other.locs_; bitField0_ = (bitField0_ & ~0x00000004); locsBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getLocsFieldBuilder() : null; } else { locsBuilder_.addAllMessages(other.locs_); } } } if (other.hasCorrupt()) { setCorrupt(other.getCorrupt()); } if (other.hasBlockToken()) { mergeBlockToken(other.getBlockToken()); } if (!other.isCached_.isEmpty()) { if (isCached_.isEmpty()) { isCached_ = other.isCached_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureIsCachedIsMutable(); isCached_.addAll(other.isCached_); } onChanged(); } if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } if (!other.storageIDs_.isEmpty()) { if (storageIDs_.isEmpty()) { storageIDs_ = other.storageIDs_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureStorageIDsIsMutable(); storageIDs_.addAll(other.storageIDs_); } onChanged(); } if (other.hasBlockIndices()) { setBlockIndices(other.getBlockIndices()); } if (blockTokensBuilder_ == null) { if (!other.blockTokens_.isEmpty()) { if (blockTokens_.isEmpty()) { blockTokens_ = other.blockTokens_; bitField0_ = (bitField0_ & ~0x00000200); } else { ensureBlockTokensIsMutable(); blockTokens_.addAll(other.blockTokens_); } onChanged(); } } else { if (!other.blockTokens_.isEmpty()) { if (blockTokensBuilder_.isEmpty()) { blockTokensBuilder_.dispose(); blockTokensBuilder_ = null; blockTokens_ = other.blockTokens_; bitField0_ = (bitField0_ & ~0x00000200); blockTokensBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getBlockTokensFieldBuilder() : null; } else { blockTokensBuilder_.addAllMessages(other.blockTokens_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasB()) { return false; } if (!hasOffset()) { return false; } if (!hasCorrupt()) { return false; } if (!hasBlockToken()) { return false; } if (!getB().isInitialized()) { return false; } for (int i = 0; i < getLocsCount(); i++) { if (!getLocs(i).isInitialized()) { return false; } } if (!getBlockToken().isInitialized()) { return false; } for (int i = 0; i < getBlockTokensCount(); i++) { if (!getBlockTokens(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public boolean hasB() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { if (bBuilder_ == null) { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } else { return bBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (value == null) { throw new NullPointerException(); } b_ = value; onChanged(); } else { bBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (bBuilder_ == null) { b_ = builderForValue.build(); onChanged(); } else { bBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && b_ != null && b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial(); } else { b_ = value; } onChanged(); } else { bBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder clearB() { if (bBuilder_ == null) { b_ = null; onChanged(); } else { bBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { if (bBuilder_ != null) { return bBuilder_.getMessageOrBuilder(); } else { return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_; } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getBFieldBuilder() { if (bBuilder_ == null) { bBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( getB(), getParentForChildren(), isClean()); b_ = null; } return bBuilder_; } private long offset_ ; /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; */ public boolean hasOffset() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; */ public long getOffset() { return offset_; } /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; */ public Builder setOffset(long value) { bitField0_ |= 0x00000002; offset_ = value; onChanged(); return this; } /** *
       * offset of first byte of block in the file
       * 
* * required uint64 offset = 2; */ public Builder clearOffset() { bitField0_ = (bitField0_ & ~0x00000002); offset_ = 0L; onChanged(); return this; } private java.util.List locs_ = java.util.Collections.emptyList(); private void ensureLocsIsMutable() { if (!((bitField0_ & 0x00000004) != 0)) { locs_ = new java.util.ArrayList(locs_); bitField0_ |= 0x00000004; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_; /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsList() { if (locsBuilder_ == null) { return java.util.Collections.unmodifiableList(locs_); } else { return locsBuilder_.getMessageList(); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public int getLocsCount() { if (locsBuilder_ == null) { return locs_.size(); } else { return locsBuilder_.getCount(); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { if (locsBuilder_ == null) { return locs_.get(index); } else { return locsBuilder_.getMessage(index); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder setLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.set(index, value); onChanged(); } else { locsBuilder_.setMessage(index, value); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder setLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.set(index, builderForValue.build()); onChanged(); } else { locsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.add(value); onChanged(); } else { locsBuilder_.addMessage(value); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.add(index, value); onChanged(); } else { locsBuilder_.addMessage(index, value); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.add(builderForValue.build()); onChanged(); } else { locsBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.add(index, builderForValue.build()); onChanged(); } else { locsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder addAllLocs( java.lang.Iterable values) { if (locsBuilder_ == null) { ensureLocsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, locs_); onChanged(); } else { locsBuilder_.addAllMessages(values); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder clearLocs() { if (locsBuilder_ == null) { locs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { locsBuilder_.clear(); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public Builder removeLocs(int index) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.remove(index); onChanged(); } else { locsBuilder_.remove(index); } return this; } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder( int index) { return getLocsFieldBuilder().getBuilder(index); } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index) { if (locsBuilder_ == null) { return locs_.get(index); } else { return locsBuilder_.getMessageOrBuilder(index); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsOrBuilderList() { if (locsBuilder_ != null) { return locsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(locs_); } } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() { return getLocsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder( int index) { return getLocsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** *
       * Locations ordered by proximity to client ip
       * 
* * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; */ public java.util.List getLocsBuilderList() { return getLocsFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getLocsFieldBuilder() { if (locsBuilder_ == null) { locsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( locs_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); locs_ = null; } return locsBuilder_; } private boolean corrupt_ ; /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; */ public boolean hasCorrupt() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; */ public boolean getCorrupt() { return corrupt_; } /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; */ public Builder setCorrupt(boolean value) { bitField0_ |= 0x00000008; corrupt_ = value; onChanged(); return this; } /** *
       * true if all replicas of a block are corrupt, else false
       * 
* * required bool corrupt = 4; */ public Builder clearCorrupt() { bitField0_ = (bitField0_ & ~0x00000008); corrupt_ = false; onChanged(); return this; } private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokenBuilder_; /** * required .hadoop.common.TokenProto blockToken = 5; */ public boolean hasBlockToken() { return ((bitField0_ & 0x00000010) != 0); } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() { if (blockTokenBuilder_ == null) { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } else { return blockTokenBuilder_.getMessage(); } } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder setBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokenBuilder_ == null) { if (value == null) { throw new NullPointerException(); } blockToken_ = value; onChanged(); } else { blockTokenBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder setBlockToken( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokenBuilder_ == null) { blockToken_ = builderForValue.build(); onChanged(); } else { blockTokenBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder mergeBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokenBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && blockToken_ != null && blockToken_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) { blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(blockToken_).mergeFrom(value).buildPartial(); } else { blockToken_ = value; } onChanged(); } else { blockTokenBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder clearBlockToken() { if (blockTokenBuilder_ == null) { blockToken_ = null; onChanged(); } else { blockTokenBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokenBuilder() { bitField0_ |= 0x00000010; onChanged(); return getBlockTokenFieldBuilder().getBuilder(); } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() { if (blockTokenBuilder_ != null) { return blockTokenBuilder_.getMessageOrBuilder(); } else { return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_; } } /** * required .hadoop.common.TokenProto blockToken = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> getBlockTokenFieldBuilder() { if (blockTokenBuilder_ == null) { blockTokenBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>( getBlockToken(), getParentForChildren(), isClean()); blockToken_ = null; } return blockTokenBuilder_; } private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList isCached_ = emptyBooleanList(); private void ensureIsCachedIsMutable() { if (!((bitField0_ & 0x00000020) != 0)) { isCached_ = mutableCopy(isCached_); bitField0_ |= 0x00000020; } } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; */ public java.util.List getIsCachedList() { return ((bitField0_ & 0x00000020) != 0) ? java.util.Collections.unmodifiableList(isCached_) : isCached_; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; */ public int getIsCachedCount() { return isCached_.size(); } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; */ public boolean getIsCached(int index) { return isCached_.getBoolean(index); } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; */ public Builder setIsCached( int index, boolean value) { ensureIsCachedIsMutable(); isCached_.setBoolean(index, value); onChanged(); return this; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; */ public Builder addIsCached(boolean value) { ensureIsCachedIsMutable(); isCached_.addBoolean(value); onChanged(); return this; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; */ public Builder addAllIsCached( java.lang.Iterable values) { ensureIsCachedIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, isCached_); onChanged(); return this; } /** *
       * if a location in locs is cached
       * 
* * repeated bool isCached = 6 [packed = true]; */ public Builder clearIsCached() { isCached_ = emptyBooleanList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000040; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) { storageTypes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageIDsIsMutable() { if (!((bitField0_ & 0x00000080) != 0)) { storageIDs_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageIDs_); bitField0_ |= 0x00000080; } } /** * repeated string storageIDs = 8; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIDsList() { return storageIDs_.getUnmodifiableView(); } /** * repeated string storageIDs = 8; */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 8; */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 8; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } /** * repeated string storageIDs = 8; */ public Builder setStorageIDs( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.set(index, value); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder addStorageIDs( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder addAllStorageIDs( java.lang.Iterable values) { ensureStorageIDsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, storageIDs_); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder clearStorageIDs() { storageIDs_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder addStorageIDsBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; */ public boolean hasBlockIndices() { return ((bitField0_ & 0x00000100) != 0); } /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices() { return blockIndices_; } /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; */ public Builder setBlockIndices(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000100; blockIndices_ = value; onChanged(); return this; } /** *
       * striped block related fields
       * 
* * optional bytes blockIndices = 9; */ public Builder clearBlockIndices() { bitField0_ = (bitField0_ & ~0x00000100); blockIndices_ = getDefaultInstance().getBlockIndices(); onChanged(); return this; } private java.util.List blockTokens_ = java.util.Collections.emptyList(); private void ensureBlockTokensIsMutable() { if (!((bitField0_ & 0x00000200) != 0)) { blockTokens_ = new java.util.ArrayList(blockTokens_); bitField0_ |= 0x00000200; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokensBuilder_; /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensList() { if (blockTokensBuilder_ == null) { return java.util.Collections.unmodifiableList(blockTokens_); } else { return blockTokensBuilder_.getMessageList(); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public int getBlockTokensCount() { if (blockTokensBuilder_ == null) { return blockTokens_.size(); } else { return blockTokensBuilder_.getCount(); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) { if (blockTokensBuilder_ == null) { return blockTokens_.get(index); } else { return blockTokensBuilder_.getMessage(index); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder setBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlockTokensIsMutable(); blockTokens_.set(index, value); onChanged(); } else { blockTokensBuilder_.setMessage(index, value); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder setBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.set(index, builderForValue.build()); onChanged(); } else { blockTokensBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlockTokensIsMutable(); blockTokens_.add(value); onChanged(); } else { blockTokensBuilder_.addMessage(value); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlockTokensIsMutable(); blockTokens_.add(index, value); onChanged(); } else { blockTokensBuilder_.addMessage(index, value); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.add(builderForValue.build()); onChanged(); } else { blockTokensBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addBlockTokens( int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.add(index, builderForValue.build()); onChanged(); } else { blockTokensBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder addAllBlockTokens( java.lang.Iterable values) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, blockTokens_); onChanged(); } else { blockTokensBuilder_.addAllMessages(values); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder clearBlockTokens() { if (blockTokensBuilder_ == null) { blockTokens_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000200); onChanged(); } else { blockTokensBuilder_.clear(); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public Builder removeBlockTokens(int index) { if (blockTokensBuilder_ == null) { ensureBlockTokensIsMutable(); blockTokens_.remove(index); onChanged(); } else { blockTokensBuilder_.remove(index); } return this; } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokensBuilder( int index) { return getBlockTokensFieldBuilder().getBuilder(index); } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder( int index) { if (blockTokensBuilder_ == null) { return blockTokens_.get(index); } else { return blockTokensBuilder_.getMessageOrBuilder(index); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensOrBuilderList() { if (blockTokensBuilder_ != null) { return blockTokensBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blockTokens_); } } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder() { return getBlockTokensFieldBuilder().addBuilder( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()); } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder( int index) { return getBlockTokensFieldBuilder().addBuilder( index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()); } /** *
       * each internal block has a block token
       * 
* * repeated .hadoop.common.TokenProto blockTokens = 10; */ public java.util.List getBlockTokensBuilderList() { return getBlockTokensFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> getBlockTokensFieldBuilder() { if (blockTokensBuilder_ == null) { blockTokensBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>( blockTokens_, ((bitField0_ & 0x00000200) != 0), getParentForChildren(), isClean()); blockTokens_ = null; } return blockTokensBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlockProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlockProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public LocatedBlockProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new LocatedBlockProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BatchedListingKeyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BatchedListingKeyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes checksum = 1; */ boolean hasChecksum(); /** * required bytes checksum = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum(); /** * required uint32 pathIndex = 2; */ boolean hasPathIndex(); /** * required uint32 pathIndex = 2; */ int getPathIndex(); /** * required bytes startAfter = 3; */ boolean hasStartAfter(); /** * required bytes startAfter = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter(); } /** * Protobuf type {@code hadoop.hdfs.BatchedListingKeyProto} */ public static final class BatchedListingKeyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BatchedListingKeyProto) BatchedListingKeyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BatchedListingKeyProto.newBuilder() to construct. private BatchedListingKeyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BatchedListingKeyProto() { checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BatchedListingKeyProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { bitField0_ |= 0x00000001; checksum_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; pathIndex_ = input.readUInt32(); break; } case 26: { bitField0_ |= 0x00000004; startAfter_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.Builder.class); } private int bitField0_; public static final int CHECKSUM_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString checksum_; /** * required bytes checksum = 1; */ public boolean hasChecksum() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes checksum = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum() { return checksum_; } public static final int PATHINDEX_FIELD_NUMBER = 2; private int pathIndex_; /** * required uint32 pathIndex = 2; */ public boolean hasPathIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 pathIndex = 2; */ public int getPathIndex() { return pathIndex_; } public static final int STARTAFTER_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString startAfter_; /** * required bytes startAfter = 3; */ public boolean hasStartAfter() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes startAfter = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter() { return startAfter_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasPathIndex()) { memoizedIsInitialized = 0; return false; } if (!hasStartAfter()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, checksum_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, pathIndex_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, startAfter_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, checksum_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, pathIndex_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, startAfter_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto) obj; if (hasChecksum() != other.hasChecksum()) return false; if (hasChecksum()) { if (!getChecksum() .equals(other.getChecksum())) return false; } if (hasPathIndex() != other.hasPathIndex()) return false; if (hasPathIndex()) { if (getPathIndex() != other.getPathIndex()) return false; } if (hasStartAfter() != other.hasStartAfter()) return false; if (hasStartAfter()) { if (!getStartAfter() .equals(other.getStartAfter())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasChecksum()) { hash = (37 * hash) + CHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getChecksum().hashCode(); } if (hasPathIndex()) { hash = (37 * hash) + PATHINDEX_FIELD_NUMBER; hash = (53 * hash) + getPathIndex(); } if (hasStartAfter()) { hash = (37 * hash) + STARTAFTER_FIELD_NUMBER; hash = (53 * hash) + getStartAfter().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.BatchedListingKeyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BatchedListingKeyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); pathIndex_ = 0; bitField0_ = (bitField0_ & ~0x00000002); startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.checksum_ = checksum_; if (((from_bitField0_ & 0x00000002) != 0)) { result.pathIndex_ = pathIndex_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.startAfter_ = startAfter_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.getDefaultInstance()) return this; if (other.hasChecksum()) { setChecksum(other.getChecksum()); } if (other.hasPathIndex()) { setPathIndex(other.getPathIndex()); } if (other.hasStartAfter()) { setStartAfter(other.getStartAfter()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasChecksum()) { return false; } if (!hasPathIndex()) { return false; } if (!hasStartAfter()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes checksum = 1; */ public boolean hasChecksum() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes checksum = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum() { return checksum_; } /** * required bytes checksum = 1; */ public Builder setChecksum(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; checksum_ = value; onChanged(); return this; } /** * required bytes checksum = 1; */ public Builder clearChecksum() { bitField0_ = (bitField0_ & ~0x00000001); checksum_ = getDefaultInstance().getChecksum(); onChanged(); return this; } private int pathIndex_ ; /** * required uint32 pathIndex = 2; */ public boolean hasPathIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 pathIndex = 2; */ public int getPathIndex() { return pathIndex_; } /** * required uint32 pathIndex = 2; */ public Builder setPathIndex(int value) { bitField0_ |= 0x00000002; pathIndex_ = value; onChanged(); return this; } /** * required uint32 pathIndex = 2; */ public Builder clearPathIndex() { bitField0_ = (bitField0_ & ~0x00000002); pathIndex_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes startAfter = 3; */ public boolean hasStartAfter() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes startAfter = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter() { return startAfter_; } /** * required bytes startAfter = 3; */ public Builder setStartAfter(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; startAfter_ = value; onChanged(); return this; } /** * required bytes startAfter = 3; */ public Builder clearStartAfter() { bitField0_ = (bitField0_ & ~0x00000004); startAfter_ = getDefaultInstance().getStartAfter(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BatchedListingKeyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BatchedListingKeyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BatchedListingKeyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new BatchedListingKeyProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DataEncryptionKeyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DataEncryptionKeyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint32 keyId = 1; */ boolean hasKeyId(); /** * required uint32 keyId = 1; */ int getKeyId(); /** * required string blockPoolId = 2; */ boolean hasBlockPoolId(); /** * required string blockPoolId = 2; */ java.lang.String getBlockPoolId(); /** * required string blockPoolId = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes(); /** * required bytes nonce = 3; */ boolean hasNonce(); /** * required bytes nonce = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getNonce(); /** * required bytes encryptionKey = 4; */ boolean hasEncryptionKey(); /** * required bytes encryptionKey = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey(); /** * required uint64 expiryDate = 5; */ boolean hasExpiryDate(); /** * required uint64 expiryDate = 5; */ long getExpiryDate(); /** * optional string encryptionAlgorithm = 6; */ boolean hasEncryptionAlgorithm(); /** * optional string encryptionAlgorithm = 6; */ java.lang.String getEncryptionAlgorithm(); /** * optional string encryptionAlgorithm = 6; */ org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionAlgorithmBytes(); } /** * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto} */ public static final class DataEncryptionKeyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DataEncryptionKeyProto) DataEncryptionKeyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DataEncryptionKeyProto.newBuilder() to construct. private DataEncryptionKeyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DataEncryptionKeyProto() { blockPoolId_ = ""; nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; encryptionAlgorithm_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DataEncryptionKeyProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; keyId_ = input.readUInt32(); break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; blockPoolId_ = bs; break; } case 26: { bitField0_ |= 0x00000004; nonce_ = input.readBytes(); break; } case 34: { bitField0_ |= 0x00000008; encryptionKey_ = input.readBytes(); break; } case 40: { bitField0_ |= 0x00000010; expiryDate_ = input.readUInt64(); break; } case 50: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000020; encryptionAlgorithm_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class); } private int bitField0_; public static final int KEYID_FIELD_NUMBER = 1; private int keyId_; /** * required uint32 keyId = 1; */ public boolean hasKeyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 keyId = 1; */ public int getKeyId() { return keyId_; } public static final int BLOCKPOOLID_FIELD_NUMBER = 2; private volatile java.lang.Object blockPoolId_; /** * required string blockPoolId = 2; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000002) != 0); } /** * required string blockPoolId = 2; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * required string blockPoolId = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int NONCE_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_; /** * required bytes nonce = 3; */ public boolean hasNonce() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes nonce = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } public static final int ENCRYPTIONKEY_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionKey_; /** * required bytes encryptionKey = 4; */ public boolean hasEncryptionKey() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes encryptionKey = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey() { return encryptionKey_; } public static final int EXPIRYDATE_FIELD_NUMBER = 5; private long expiryDate_; /** * required uint64 expiryDate = 5; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 expiryDate = 5; */ public long getExpiryDate() { return expiryDate_; } public static final int ENCRYPTIONALGORITHM_FIELD_NUMBER = 6; private volatile java.lang.Object encryptionAlgorithm_; /** * optional string encryptionAlgorithm = 6; */ public boolean hasEncryptionAlgorithm() { return ((bitField0_ & 0x00000020) != 0); } /** * optional string encryptionAlgorithm = 6; */ public java.lang.String getEncryptionAlgorithm() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { encryptionAlgorithm_ = s; } return s; } } /** * optional string encryptionAlgorithm = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionAlgorithmBytes() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); encryptionAlgorithm_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasKeyId()) { memoizedIsInitialized = 0; return false; } if (!hasBlockPoolId()) { memoizedIsInitialized = 0; return false; } if (!hasNonce()) { memoizedIsInitialized = 0; return false; } if (!hasEncryptionKey()) { memoizedIsInitialized = 0; return false; } if (!hasExpiryDate()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, keyId_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, nonce_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, encryptionKey_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, expiryDate_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, encryptionAlgorithm_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, keyId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, nonce_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, encryptionKey_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, expiryDate_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, encryptionAlgorithm_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) obj; if (hasKeyId() != other.hasKeyId()) return false; if (hasKeyId()) { if (getKeyId() != other.getKeyId()) return false; } if (hasBlockPoolId() != other.hasBlockPoolId()) return false; if (hasBlockPoolId()) { if (!getBlockPoolId() .equals(other.getBlockPoolId())) return false; } if (hasNonce() != other.hasNonce()) return false; if (hasNonce()) { if (!getNonce() .equals(other.getNonce())) return false; } if (hasEncryptionKey() != other.hasEncryptionKey()) return false; if (hasEncryptionKey()) { if (!getEncryptionKey() .equals(other.getEncryptionKey())) return false; } if (hasExpiryDate() != other.hasExpiryDate()) return false; if (hasExpiryDate()) { if (getExpiryDate() != other.getExpiryDate()) return false; } if (hasEncryptionAlgorithm() != other.hasEncryptionAlgorithm()) return false; if (hasEncryptionAlgorithm()) { if (!getEncryptionAlgorithm() .equals(other.getEncryptionAlgorithm())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasKeyId()) { hash = (37 * hash) + KEYID_FIELD_NUMBER; hash = (53 * hash) + getKeyId(); } if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasNonce()) { hash = (37 * hash) + NONCE_FIELD_NUMBER; hash = (53 * hash) + getNonce().hashCode(); } if (hasEncryptionKey()) { hash = (37 * hash) + ENCRYPTIONKEY_FIELD_NUMBER; hash = (53 * hash) + getEncryptionKey().hashCode(); } if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getExpiryDate()); } if (hasEncryptionAlgorithm()) { hash = (37 * hash) + ENCRYPTIONALGORITHM_FIELD_NUMBER; hash = (53 * hash) + getEncryptionAlgorithm().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DataEncryptionKeyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); keyId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); blockPoolId_ = ""; bitField0_ = (bitField0_ & ~0x00000002); nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); expiryDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); encryptionAlgorithm_ = ""; bitField0_ = (bitField0_ & ~0x00000020); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.keyId_ = keyId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.blockPoolId_ = blockPoolId_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.nonce_ = nonce_; if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.encryptionKey_ = encryptionKey_; if (((from_bitField0_ & 0x00000010) != 0)) { result.expiryDate_ = expiryDate_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { to_bitField0_ |= 0x00000020; } result.encryptionAlgorithm_ = encryptionAlgorithm_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance()) return this; if (other.hasKeyId()) { setKeyId(other.getKeyId()); } if (other.hasBlockPoolId()) { bitField0_ |= 0x00000002; blockPoolId_ = other.blockPoolId_; onChanged(); } if (other.hasNonce()) { setNonce(other.getNonce()); } if (other.hasEncryptionKey()) { setEncryptionKey(other.getEncryptionKey()); } if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } if (other.hasEncryptionAlgorithm()) { bitField0_ |= 0x00000020; encryptionAlgorithm_ = other.encryptionAlgorithm_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasKeyId()) { return false; } if (!hasBlockPoolId()) { return false; } if (!hasNonce()) { return false; } if (!hasEncryptionKey()) { return false; } if (!hasExpiryDate()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int keyId_ ; /** * required uint32 keyId = 1; */ public boolean hasKeyId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 keyId = 1; */ public int getKeyId() { return keyId_; } /** * required uint32 keyId = 1; */ public Builder setKeyId(int value) { bitField0_ |= 0x00000001; keyId_ = value; onChanged(); return this; } /** * required uint32 keyId = 1; */ public Builder clearKeyId() { bitField0_ = (bitField0_ & ~0x00000001); keyId_ = 0; onChanged(); return this; } private java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 2; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000002) != 0); } /** * required string blockPoolId = 2; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolId = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string blockPoolId = 2; */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; blockPoolId_ = value; onChanged(); return this; } /** * required string blockPoolId = 2; */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000002); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /** * required string blockPoolId = 2; */ public Builder setBlockPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; blockPoolId_ = value; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes nonce = 3; */ public boolean hasNonce() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes nonce = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() { return nonce_; } /** * required bytes nonce = 3; */ public Builder setNonce(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; nonce_ = value; onChanged(); return this; } /** * required bytes nonce = 3; */ public Builder clearNonce() { bitField0_ = (bitField0_ & ~0x00000004); nonce_ = getDefaultInstance().getNonce(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes encryptionKey = 4; */ public boolean hasEncryptionKey() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes encryptionKey = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey() { return encryptionKey_; } /** * required bytes encryptionKey = 4; */ public Builder setEncryptionKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; encryptionKey_ = value; onChanged(); return this; } /** * required bytes encryptionKey = 4; */ public Builder clearEncryptionKey() { bitField0_ = (bitField0_ & ~0x00000008); encryptionKey_ = getDefaultInstance().getEncryptionKey(); onChanged(); return this; } private long expiryDate_ ; /** * required uint64 expiryDate = 5; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint64 expiryDate = 5; */ public long getExpiryDate() { return expiryDate_; } /** * required uint64 expiryDate = 5; */ public Builder setExpiryDate(long value) { bitField0_ |= 0x00000010; expiryDate_ = value; onChanged(); return this; } /** * required uint64 expiryDate = 5; */ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000010); expiryDate_ = 0L; onChanged(); return this; } private java.lang.Object encryptionAlgorithm_ = ""; /** * optional string encryptionAlgorithm = 6; */ public boolean hasEncryptionAlgorithm() { return ((bitField0_ & 0x00000020) != 0); } /** * optional string encryptionAlgorithm = 6; */ public java.lang.String getEncryptionAlgorithm() { java.lang.Object ref = encryptionAlgorithm_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { encryptionAlgorithm_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string encryptionAlgorithm = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionAlgorithmBytes() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); encryptionAlgorithm_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string encryptionAlgorithm = 6; */ public Builder setEncryptionAlgorithm( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; encryptionAlgorithm_ = value; onChanged(); return this; } /** * optional string encryptionAlgorithm = 6; */ public Builder clearEncryptionAlgorithm() { bitField0_ = (bitField0_ & ~0x00000020); encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm(); onChanged(); return this; } /** * optional string encryptionAlgorithm = 6; */ public Builder setEncryptionAlgorithmBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; encryptionAlgorithm_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataEncryptionKeyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DataEncryptionKeyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DataEncryptionKeyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DataEncryptionKeyProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface FileEncryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.FileEncryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ boolean hasCryptoProtocolVersion(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(); /** * required bytes key = 3; */ boolean hasKey(); /** * required bytes key = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getKey(); /** * required bytes iv = 4; */ boolean hasIv(); /** * required bytes iv = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getIv(); /** * required string keyName = 5; */ boolean hasKeyName(); /** * required string keyName = 5; */ java.lang.String getKeyName(); /** * required string keyName = 5; */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes(); /** * required string ezKeyVersionName = 6; */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 6; */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 6; */ org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes(); } /** *
   **
   * Encryption information for a file.
   * 
* * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto} */ public static final class FileEncryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.FileEncryptionInfoProto) FileEncryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use FileEncryptionInfoProto.newBuilder() to construct. private FileEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FileEncryptionInfoProto() { suite_ = 1; cryptoProtocolVersion_ = 1; key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; keyName_ = ""; ezKeyVersionName_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FileEncryptionInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; suite_ = rawValue; } break; } case 16: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; cryptoProtocolVersion_ = rawValue; } break; } case 26: { bitField0_ |= 0x00000004; key_ = input.readBytes(); break; } case 34: { bitField0_ |= 0x00000008; iv_ = input.readBytes(); break; } case 42: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000010; keyName_ = bs; break; } case 50: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000020; ezKeyVersionName_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class); } private int bitField0_; public static final int SUITE_FIELD_NUMBER = 1; private int suite_; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2; private int cryptoProtocolVersion_; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } public static final int KEY_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString key_; /** * required bytes key = 3; */ public boolean hasKey() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes key = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } public static final int IV_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString iv_; /** * required bytes iv = 4; */ public boolean hasIv() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes iv = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } public static final int KEYNAME_FIELD_NUMBER = 5; private volatile java.lang.Object keyName_; /** * required string keyName = 5; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000010) != 0); } /** * required string keyName = 5; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } } /** * required string keyName = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 6; private volatile java.lang.Object ezKeyVersionName_; /** * required string ezKeyVersionName = 6; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000020) != 0); } /** * required string ezKeyVersionName = 6; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } if (!hasCryptoProtocolVersion()) { memoizedIsInitialized = 0; return false; } if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasIv()) { memoizedIsInitialized = 0; return false; } if (!hasKeyName()) { memoizedIsInitialized = 0; return false; } if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, key_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, iv_); } if (((bitField0_ & 0x00000010) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, keyName_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, ezKeyVersionName_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, key_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, iv_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, keyName_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, ezKeyVersionName_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) obj; if (hasSuite() != other.hasSuite()) return false; if (hasSuite()) { if (suite_ != other.suite_) return false; } if (hasCryptoProtocolVersion() != other.hasCryptoProtocolVersion()) return false; if (hasCryptoProtocolVersion()) { if (cryptoProtocolVersion_ != other.cryptoProtocolVersion_) return false; } if (hasKey() != other.hasKey()) return false; if (hasKey()) { if (!getKey() .equals(other.getKey())) return false; } if (hasIv() != other.hasIv()) return false; if (hasIv()) { if (!getIv() .equals(other.getIv())) return false; } if (hasKeyName() != other.hasKeyName()) return false; if (hasKeyName()) { if (!getKeyName() .equals(other.getKeyName())) return false; } if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false; if (hasEzKeyVersionName()) { if (!getEzKeyVersionName() .equals(other.getEzKeyVersionName())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + suite_; } if (hasCryptoProtocolVersion()) { hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER; hash = (53 * hash) + cryptoProtocolVersion_; } if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasIv()) { hash = (37 * hash) + IV_FIELD_NUMBER; hash = (53 * hash) + getIv().hashCode(); } if (hasKeyName()) { hash = (37 * hash) + KEYNAME_FIELD_NUMBER; hash = (53 * hash) + getKeyName().hashCode(); } if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Encryption information for a file.
     * 
* * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.FileEncryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); suite_ = 1; bitField0_ = (bitField0_ & ~0x00000001); cryptoProtocolVersion_ = 1; bitField0_ = (bitField0_ & ~0x00000002); key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); keyName_ = ""; bitField0_ = (bitField0_ & ~0x00000010); ezKeyVersionName_ = ""; bitField0_ = (bitField0_ & ~0x00000020); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.suite_ = suite_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.cryptoProtocolVersion_ = cryptoProtocolVersion_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.key_ = key_; if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.iv_ = iv_; if (((from_bitField0_ & 0x00000010) != 0)) { to_bitField0_ |= 0x00000010; } result.keyName_ = keyName_; if (((from_bitField0_ & 0x00000020) != 0)) { to_bitField0_ |= 0x00000020; } result.ezKeyVersionName_ = ezKeyVersionName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasCryptoProtocolVersion()) { setCryptoProtocolVersion(other.getCryptoProtocolVersion()); } if (other.hasKey()) { setKey(other.getKey()); } if (other.hasIv()) { setIv(other.getIv()); } if (other.hasKeyName()) { bitField0_ |= 0x00000010; keyName_ = other.keyName_; onChanged(); } if (other.hasEzKeyVersionName()) { bitField0_ |= 0x00000020; ezKeyVersionName_ = other.ezKeyVersionName_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSuite()) { return false; } if (!hasCryptoProtocolVersion()) { return false; } if (!hasKey()) { return false; } if (!hasIv()) { return false; } if (!hasKeyName()) { return false; } if (!hasEzKeyVersionName()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = 1; onChanged(); return this; } private int cryptoProtocolVersion_ = 1; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder clearCryptoProtocolVersion() { bitField0_ = (bitField0_ & ~0x00000002); cryptoProtocolVersion_ = 1; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes key = 3; */ public boolean hasKey() { return ((bitField0_ & 0x00000004) != 0); } /** * required bytes key = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } /** * required bytes key = 3; */ public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; } /** * required bytes key = 3; */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000004); key_ = getDefaultInstance().getKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes iv = 4; */ public boolean hasIv() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes iv = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } /** * required bytes iv = 4; */ public Builder setIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; iv_ = value; onChanged(); return this; } /** * required bytes iv = 4; */ public Builder clearIv() { bitField0_ = (bitField0_ & ~0x00000008); iv_ = getDefaultInstance().getIv(); onChanged(); return this; } private java.lang.Object keyName_ = ""; /** * required string keyName = 5; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000010) != 0); } /** * required string keyName = 5; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string keyName = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string keyName = 5; */ public Builder setKeyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; keyName_ = value; onChanged(); return this; } /** * required string keyName = 5; */ public Builder clearKeyName() { bitField0_ = (bitField0_ & ~0x00000010); keyName_ = getDefaultInstance().getKeyName(); onChanged(); return this; } /** * required string keyName = 5; */ public Builder setKeyNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; keyName_ = value; onChanged(); return this; } private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 6; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000020) != 0); } /** * required string ezKeyVersionName = 6; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 6; */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; ezKeyVersionName_ = value; onChanged(); return this; } /** * required string ezKeyVersionName = 6; */ public Builder clearEzKeyVersionName() { bitField0_ = (bitField0_ & ~0x00000020); ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); onChanged(); return this; } /** * required string ezKeyVersionName = 6; */ public Builder setEzKeyVersionNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; ezKeyVersionName_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FileEncryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FileEncryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FileEncryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new FileEncryptionInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface PerFileEncryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.PerFileEncryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes key = 1; */ boolean hasKey(); /** * required bytes key = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getKey(); /** * required bytes iv = 2; */ boolean hasIv(); /** * required bytes iv = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getIv(); /** * required string ezKeyVersionName = 3; */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 3; */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes(); } /** *
   **
   * Encryption information for an individual
   * file within an encryption zone
   * 
* * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto} */ public static final class PerFileEncryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.PerFileEncryptionInfoProto) PerFileEncryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use PerFileEncryptionInfoProto.newBuilder() to construct. private PerFileEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private PerFileEncryptionInfoProto() { key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; ezKeyVersionName_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PerFileEncryptionInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { bitField0_ |= 0x00000001; key_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; iv_ = input.readBytes(); break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; ezKeyVersionName_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class); } private int bitField0_; public static final int KEY_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString key_; /** * required bytes key = 1; */ public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes key = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } public static final int IV_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString iv_; /** * required bytes iv = 2; */ public boolean hasIv() { return ((bitField0_ & 0x00000002) != 0); } /** * required bytes iv = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 3; private volatile java.lang.Object ezKeyVersionName_; /** * required string ezKeyVersionName = 3; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string ezKeyVersionName = 3; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasIv()) { memoizedIsInitialized = 0; return false; } if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, key_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, iv_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, ezKeyVersionName_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, key_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, iv_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, ezKeyVersionName_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) obj; if (hasKey() != other.hasKey()) return false; if (hasKey()) { if (!getKey() .equals(other.getKey())) return false; } if (hasIv() != other.hasIv()) return false; if (hasIv()) { if (!getIv() .equals(other.getIv())) return false; } if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false; if (hasEzKeyVersionName()) { if (!getEzKeyVersionName() .equals(other.getEzKeyVersionName())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasIv()) { hash = (37 * hash) + IV_FIELD_NUMBER; hash = (53 * hash) + getIv().hashCode(); } if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Encryption information for an individual
     * file within an encryption zone
     * 
* * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.PerFileEncryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); ezKeyVersionName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.key_ = key_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.iv_ = iv_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.ezKeyVersionName_ = ezKeyVersionName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasKey()) { setKey(other.getKey()); } if (other.hasIv()) { setIv(other.getIv()); } if (other.hasEzKeyVersionName()) { bitField0_ |= 0x00000004; ezKeyVersionName_ = other.ezKeyVersionName_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasKey()) { return false; } if (!hasIv()) { return false; } if (!hasEzKeyVersionName()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes key = 1; */ public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes key = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } /** * required bytes key = 1; */ public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; key_ = value; onChanged(); return this; } /** * required bytes key = 1; */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); key_ = getDefaultInstance().getKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes iv = 2; */ public boolean hasIv() { return ((bitField0_ & 0x00000002) != 0); } /** * required bytes iv = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() { return iv_; } /** * required bytes iv = 2; */ public Builder setIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; iv_ = value; onChanged(); return this; } /** * required bytes iv = 2; */ public Builder clearIv() { bitField0_ = (bitField0_ & ~0x00000002); iv_ = getDefaultInstance().getIv(); onChanged(); return this; } private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 3; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string ezKeyVersionName = 3; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 3; */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; ezKeyVersionName_ = value; onChanged(); return this; } /** * required string ezKeyVersionName = 3; */ public Builder clearEzKeyVersionName() { bitField0_ = (bitField0_ & ~0x00000004); ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); onChanged(); return this; } /** * required string ezKeyVersionName = 3; */ public Builder setEzKeyVersionNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; ezKeyVersionName_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PerFileEncryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.PerFileEncryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public PerFileEncryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new PerFileEncryptionInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ZoneEncryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ZoneEncryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ boolean hasCryptoProtocolVersion(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(); /** * required string keyName = 3; */ boolean hasKeyName(); /** * required string keyName = 3; */ java.lang.String getKeyName(); /** * required string keyName = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes(); /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ boolean hasReencryptionProto(); /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto(); /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder(); } /** *
   **
   * Encryption information for an encryption
   * zone
   * 
* * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto} */ public static final class ZoneEncryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ZoneEncryptionInfoProto) ZoneEncryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ZoneEncryptionInfoProto.newBuilder() to construct. private ZoneEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ZoneEncryptionInfoProto() { suite_ = 1; cryptoProtocolVersion_ = 1; keyName_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ZoneEncryptionInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; suite_ = rawValue; } break; } case 16: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; cryptoProtocolVersion_ = rawValue; } break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; keyName_ = bs; break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) != 0)) { subBuilder = reencryptionProto_.toBuilder(); } reencryptionProto_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reencryptionProto_); reencryptionProto_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class); } private int bitField0_; public static final int SUITE_FIELD_NUMBER = 1; private int suite_; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2; private int cryptoProtocolVersion_; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } public static final int KEYNAME_FIELD_NUMBER = 3; private volatile java.lang.Object keyName_; /** * required string keyName = 3; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string keyName = 3; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } } /** * required string keyName = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int REENCRYPTIONPROTO_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_; /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public boolean hasReencryptionProto() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } if (!hasCryptoProtocolVersion()) { memoizedIsInitialized = 0; return false; } if (!hasKeyName()) { memoizedIsInitialized = 0; return false; } if (hasReencryptionProto()) { if (!getReencryptionProto().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeEnum(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, keyName_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getReencryptionProto()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(2, cryptoProtocolVersion_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, keyName_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getReencryptionProto()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) obj; if (hasSuite() != other.hasSuite()) return false; if (hasSuite()) { if (suite_ != other.suite_) return false; } if (hasCryptoProtocolVersion() != other.hasCryptoProtocolVersion()) return false; if (hasCryptoProtocolVersion()) { if (cryptoProtocolVersion_ != other.cryptoProtocolVersion_) return false; } if (hasKeyName() != other.hasKeyName()) return false; if (hasKeyName()) { if (!getKeyName() .equals(other.getKeyName())) return false; } if (hasReencryptionProto() != other.hasReencryptionProto()) return false; if (hasReencryptionProto()) { if (!getReencryptionProto() .equals(other.getReencryptionProto())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + suite_; } if (hasCryptoProtocolVersion()) { hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER; hash = (53 * hash) + cryptoProtocolVersion_; } if (hasKeyName()) { hash = (37 * hash) + KEYNAME_FIELD_NUMBER; hash = (53 * hash) + getKeyName().hashCode(); } if (hasReencryptionProto()) { hash = (37 * hash) + REENCRYPTIONPROTO_FIELD_NUMBER; hash = (53 * hash) + getReencryptionProto().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Encryption information for an encryption
     * zone
     * 
* * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ZoneEncryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getReencryptionProtoFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); suite_ = 1; bitField0_ = (bitField0_ & ~0x00000001); cryptoProtocolVersion_ = 1; bitField0_ = (bitField0_ & ~0x00000002); keyName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); if (reencryptionProtoBuilder_ == null) { reencryptionProto_ = null; } else { reencryptionProtoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.suite_ = suite_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.cryptoProtocolVersion_ = cryptoProtocolVersion_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.keyName_ = keyName_; if (((from_bitField0_ & 0x00000008) != 0)) { if (reencryptionProtoBuilder_ == null) { result.reencryptionProto_ = reencryptionProto_; } else { result.reencryptionProto_ = reencryptionProtoBuilder_.build(); } to_bitField0_ |= 0x00000008; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasCryptoProtocolVersion()) { setCryptoProtocolVersion(other.getCryptoProtocolVersion()); } if (other.hasKeyName()) { bitField0_ |= 0x00000004; keyName_ = other.keyName_; onChanged(); } if (other.hasReencryptionProto()) { mergeReencryptionProto(other.getReencryptionProto()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSuite()) { return false; } if (!hasCryptoProtocolVersion()) { return false; } if (!hasKeyName()) { return false; } if (hasReencryptionProto()) { if (!getReencryptionProto().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = 1; onChanged(); return this; } private int cryptoProtocolVersion_ = 1; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(cryptoProtocolVersion_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder clearCryptoProtocolVersion() { bitField0_ = (bitField0_ & ~0x00000002); cryptoProtocolVersion_ = 1; onChanged(); return this; } private java.lang.Object keyName_ = ""; /** * required string keyName = 3; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000004) != 0); } /** * required string keyName = 3; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string keyName = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string keyName = 3; */ public Builder setKeyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; keyName_ = value; onChanged(); return this; } /** * required string keyName = 3; */ public Builder clearKeyName() { bitField0_ = (bitField0_ & ~0x00000004); keyName_ = getDefaultInstance().getKeyName(); onChanged(); return this; } /** * required string keyName = 3; */ public Builder setKeyNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; keyName_ = value; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder> reencryptionProtoBuilder_; /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public boolean hasReencryptionProto() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() { if (reencryptionProtoBuilder_ == null) { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } else { return reencryptionProtoBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder setReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) { if (reencryptionProtoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reencryptionProto_ = value; onChanged(); } else { reencryptionProtoBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder setReencryptionProto( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder builderForValue) { if (reencryptionProtoBuilder_ == null) { reencryptionProto_ = builderForValue.build(); onChanged(); } else { reencryptionProtoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder mergeReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) { if (reencryptionProtoBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && reencryptionProto_ != null && reencryptionProto_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) { reencryptionProto_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.newBuilder(reencryptionProto_).mergeFrom(value).buildPartial(); } else { reencryptionProto_ = value; } onChanged(); } else { reencryptionProtoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public Builder clearReencryptionProto() { if (reencryptionProtoBuilder_ == null) { reencryptionProto_ = null; onChanged(); } else { reencryptionProtoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder getReencryptionProtoBuilder() { bitField0_ |= 0x00000008; onChanged(); return getReencryptionProtoFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() { if (reencryptionProtoBuilder_ != null) { return reencryptionProtoBuilder_.getMessageOrBuilder(); } else { return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_; } } /** * optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder> getReencryptionProtoFieldBuilder() { if (reencryptionProtoBuilder_ == null) { reencryptionProtoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder>( getReencryptionProto(), getParentForChildren(), isClean()); reencryptionProto_ = null; } return reencryptionProtoBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ZoneEncryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ZoneEncryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ZoneEncryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ZoneEncryptionInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ReencryptionInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReencryptionInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string ezKeyVersionName = 1; */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 1; */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes(); /** * required uint64 submissionTime = 2; */ boolean hasSubmissionTime(); /** * required uint64 submissionTime = 2; */ long getSubmissionTime(); /** * required bool canceled = 3; */ boolean hasCanceled(); /** * required bool canceled = 3; */ boolean getCanceled(); /** * required int64 numReencrypted = 4; */ boolean hasNumReencrypted(); /** * required int64 numReencrypted = 4; */ long getNumReencrypted(); /** * required int64 numFailures = 5; */ boolean hasNumFailures(); /** * required int64 numFailures = 5; */ long getNumFailures(); /** * optional uint64 completionTime = 6; */ boolean hasCompletionTime(); /** * optional uint64 completionTime = 6; */ long getCompletionTime(); /** * optional string lastFile = 7; */ boolean hasLastFile(); /** * optional string lastFile = 7; */ java.lang.String getLastFile(); /** * optional string lastFile = 7; */ org.apache.hadoop.thirdparty.protobuf.ByteString getLastFileBytes(); } /** *
   **
   * Re-encryption information for an encryption zone
   * 
* * Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto} */ public static final class ReencryptionInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ReencryptionInfoProto) ReencryptionInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ReencryptionInfoProto.newBuilder() to construct. private ReencryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ReencryptionInfoProto() { ezKeyVersionName_ = ""; lastFile_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ReencryptionInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; ezKeyVersionName_ = bs; break; } case 16: { bitField0_ |= 0x00000002; submissionTime_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; canceled_ = input.readBool(); break; } case 32: { bitField0_ |= 0x00000008; numReencrypted_ = input.readInt64(); break; } case 40: { bitField0_ |= 0x00000010; numFailures_ = input.readInt64(); break; } case 48: { bitField0_ |= 0x00000020; completionTime_ = input.readUInt64(); break; } case 58: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000040; lastFile_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class); } private int bitField0_; public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 1; private volatile java.lang.Object ezKeyVersionName_; /** * required string ezKeyVersionName = 1; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string ezKeyVersionName = 1; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int SUBMISSIONTIME_FIELD_NUMBER = 2; private long submissionTime_; /** * required uint64 submissionTime = 2; */ public boolean hasSubmissionTime() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 submissionTime = 2; */ public long getSubmissionTime() { return submissionTime_; } public static final int CANCELED_FIELD_NUMBER = 3; private boolean canceled_; /** * required bool canceled = 3; */ public boolean hasCanceled() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool canceled = 3; */ public boolean getCanceled() { return canceled_; } public static final int NUMREENCRYPTED_FIELD_NUMBER = 4; private long numReencrypted_; /** * required int64 numReencrypted = 4; */ public boolean hasNumReencrypted() { return ((bitField0_ & 0x00000008) != 0); } /** * required int64 numReencrypted = 4; */ public long getNumReencrypted() { return numReencrypted_; } public static final int NUMFAILURES_FIELD_NUMBER = 5; private long numFailures_; /** * required int64 numFailures = 5; */ public boolean hasNumFailures() { return ((bitField0_ & 0x00000010) != 0); } /** * required int64 numFailures = 5; */ public long getNumFailures() { return numFailures_; } public static final int COMPLETIONTIME_FIELD_NUMBER = 6; private long completionTime_; /** * optional uint64 completionTime = 6; */ public boolean hasCompletionTime() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 completionTime = 6; */ public long getCompletionTime() { return completionTime_; } public static final int LASTFILE_FIELD_NUMBER = 7; private volatile java.lang.Object lastFile_; /** * optional string lastFile = 7; */ public boolean hasLastFile() { return ((bitField0_ & 0x00000040) != 0); } /** * optional string lastFile = 7; */ public java.lang.String getLastFile() { java.lang.Object ref = lastFile_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { lastFile_ = s; } return s; } } /** * optional string lastFile = 7; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getLastFileBytes() { java.lang.Object ref = lastFile_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); lastFile_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } if (!hasSubmissionTime()) { memoizedIsInitialized = 0; return false; } if (!hasCanceled()) { memoizedIsInitialized = 0; return false; } if (!hasNumReencrypted()) { memoizedIsInitialized = 0; return false; } if (!hasNumFailures()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, ezKeyVersionName_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, submissionTime_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBool(3, canceled_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeInt64(4, numReencrypted_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeInt64(5, numFailures_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, completionTime_); } if (((bitField0_ & 0x00000040) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 7, lastFile_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, ezKeyVersionName_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, submissionTime_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(3, canceled_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(4, numReencrypted_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(5, numFailures_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, completionTime_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(7, lastFile_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) obj; if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false; if (hasEzKeyVersionName()) { if (!getEzKeyVersionName() .equals(other.getEzKeyVersionName())) return false; } if (hasSubmissionTime() != other.hasSubmissionTime()) return false; if (hasSubmissionTime()) { if (getSubmissionTime() != other.getSubmissionTime()) return false; } if (hasCanceled() != other.hasCanceled()) return false; if (hasCanceled()) { if (getCanceled() != other.getCanceled()) return false; } if (hasNumReencrypted() != other.hasNumReencrypted()) return false; if (hasNumReencrypted()) { if (getNumReencrypted() != other.getNumReencrypted()) return false; } if (hasNumFailures() != other.hasNumFailures()) return false; if (hasNumFailures()) { if (getNumFailures() != other.getNumFailures()) return false; } if (hasCompletionTime() != other.hasCompletionTime()) return false; if (hasCompletionTime()) { if (getCompletionTime() != other.getCompletionTime()) return false; } if (hasLastFile() != other.hasLastFile()) return false; if (hasLastFile()) { if (!getLastFile() .equals(other.getLastFile())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } if (hasSubmissionTime()) { hash = (37 * hash) + SUBMISSIONTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSubmissionTime()); } if (hasCanceled()) { hash = (37 * hash) + CANCELED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getCanceled()); } if (hasNumReencrypted()) { hash = (37 * hash) + NUMREENCRYPTED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumReencrypted()); } if (hasNumFailures()) { hash = (37 * hash) + NUMFAILURES_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumFailures()); } if (hasCompletionTime()) { hash = (37 * hash) + COMPLETIONTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getCompletionTime()); } if (hasLastFile()) { hash = (37 * hash) + LASTFILE_FIELD_NUMBER; hash = (53 * hash) + getLastFile().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Re-encryption information for an encryption zone
     * 
* * Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReencryptionInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); ezKeyVersionName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); submissionTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); canceled_ = false; bitField0_ = (bitField0_ & ~0x00000004); numReencrypted_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); numFailures_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); completionTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); lastFile_ = ""; bitField0_ = (bitField0_ & ~0x00000040); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.ezKeyVersionName_ = ezKeyVersionName_; if (((from_bitField0_ & 0x00000002) != 0)) { result.submissionTime_ = submissionTime_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.canceled_ = canceled_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.numReencrypted_ = numReencrypted_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.numFailures_ = numFailures_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.completionTime_ = completionTime_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { to_bitField0_ |= 0x00000040; } result.lastFile_ = lastFile_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) return this; if (other.hasEzKeyVersionName()) { bitField0_ |= 0x00000001; ezKeyVersionName_ = other.ezKeyVersionName_; onChanged(); } if (other.hasSubmissionTime()) { setSubmissionTime(other.getSubmissionTime()); } if (other.hasCanceled()) { setCanceled(other.getCanceled()); } if (other.hasNumReencrypted()) { setNumReencrypted(other.getNumReencrypted()); } if (other.hasNumFailures()) { setNumFailures(other.getNumFailures()); } if (other.hasCompletionTime()) { setCompletionTime(other.getCompletionTime()); } if (other.hasLastFile()) { bitField0_ |= 0x00000040; lastFile_ = other.lastFile_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasEzKeyVersionName()) { return false; } if (!hasSubmissionTime()) { return false; } if (!hasCanceled()) { return false; } if (!hasNumReencrypted()) { return false; } if (!hasNumFailures()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 1; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string ezKeyVersionName = 1; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 1; */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; ezKeyVersionName_ = value; onChanged(); return this; } /** * required string ezKeyVersionName = 1; */ public Builder clearEzKeyVersionName() { bitField0_ = (bitField0_ & ~0x00000001); ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); onChanged(); return this; } /** * required string ezKeyVersionName = 1; */ public Builder setEzKeyVersionNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; ezKeyVersionName_ = value; onChanged(); return this; } private long submissionTime_ ; /** * required uint64 submissionTime = 2; */ public boolean hasSubmissionTime() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 submissionTime = 2; */ public long getSubmissionTime() { return submissionTime_; } /** * required uint64 submissionTime = 2; */ public Builder setSubmissionTime(long value) { bitField0_ |= 0x00000002; submissionTime_ = value; onChanged(); return this; } /** * required uint64 submissionTime = 2; */ public Builder clearSubmissionTime() { bitField0_ = (bitField0_ & ~0x00000002); submissionTime_ = 0L; onChanged(); return this; } private boolean canceled_ ; /** * required bool canceled = 3; */ public boolean hasCanceled() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool canceled = 3; */ public boolean getCanceled() { return canceled_; } /** * required bool canceled = 3; */ public Builder setCanceled(boolean value) { bitField0_ |= 0x00000004; canceled_ = value; onChanged(); return this; } /** * required bool canceled = 3; */ public Builder clearCanceled() { bitField0_ = (bitField0_ & ~0x00000004); canceled_ = false; onChanged(); return this; } private long numReencrypted_ ; /** * required int64 numReencrypted = 4; */ public boolean hasNumReencrypted() { return ((bitField0_ & 0x00000008) != 0); } /** * required int64 numReencrypted = 4; */ public long getNumReencrypted() { return numReencrypted_; } /** * required int64 numReencrypted = 4; */ public Builder setNumReencrypted(long value) { bitField0_ |= 0x00000008; numReencrypted_ = value; onChanged(); return this; } /** * required int64 numReencrypted = 4; */ public Builder clearNumReencrypted() { bitField0_ = (bitField0_ & ~0x00000008); numReencrypted_ = 0L; onChanged(); return this; } private long numFailures_ ; /** * required int64 numFailures = 5; */ public boolean hasNumFailures() { return ((bitField0_ & 0x00000010) != 0); } /** * required int64 numFailures = 5; */ public long getNumFailures() { return numFailures_; } /** * required int64 numFailures = 5; */ public Builder setNumFailures(long value) { bitField0_ |= 0x00000010; numFailures_ = value; onChanged(); return this; } /** * required int64 numFailures = 5; */ public Builder clearNumFailures() { bitField0_ = (bitField0_ & ~0x00000010); numFailures_ = 0L; onChanged(); return this; } private long completionTime_ ; /** * optional uint64 completionTime = 6; */ public boolean hasCompletionTime() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 completionTime = 6; */ public long getCompletionTime() { return completionTime_; } /** * optional uint64 completionTime = 6; */ public Builder setCompletionTime(long value) { bitField0_ |= 0x00000020; completionTime_ = value; onChanged(); return this; } /** * optional uint64 completionTime = 6; */ public Builder clearCompletionTime() { bitField0_ = (bitField0_ & ~0x00000020); completionTime_ = 0L; onChanged(); return this; } private java.lang.Object lastFile_ = ""; /** * optional string lastFile = 7; */ public boolean hasLastFile() { return ((bitField0_ & 0x00000040) != 0); } /** * optional string lastFile = 7; */ public java.lang.String getLastFile() { java.lang.Object ref = lastFile_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { lastFile_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string lastFile = 7; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getLastFileBytes() { java.lang.Object ref = lastFile_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); lastFile_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string lastFile = 7; */ public Builder setLastFile( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000040; lastFile_ = value; onChanged(); return this; } /** * optional string lastFile = 7; */ public Builder clearLastFile() { bitField0_ = (bitField0_ & ~0x00000040); lastFile_ = getDefaultInstance().getLastFile(); onChanged(); return this; } /** * optional string lastFile = 7; */ public Builder setLastFileBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000040; lastFile_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReencryptionInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReencryptionInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ReencryptionInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ReencryptionInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface CipherOptionProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CipherOptionProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); /** * optional bytes inKey = 2; */ boolean hasInKey(); /** * optional bytes inKey = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getInKey(); /** * optional bytes inIv = 3; */ boolean hasInIv(); /** * optional bytes inIv = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getInIv(); /** * optional bytes outKey = 4; */ boolean hasOutKey(); /** * optional bytes outKey = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey(); /** * optional bytes outIv = 5; */ boolean hasOutIv(); /** * optional bytes outIv = 5; */ org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv(); } /** *
   **
   * Cipher option
   * 
* * Protobuf type {@code hadoop.hdfs.CipherOptionProto} */ public static final class CipherOptionProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.CipherOptionProto) CipherOptionProtoOrBuilder { private static final long serialVersionUID = 0L; // Use CipherOptionProto.newBuilder() to construct. private CipherOptionProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private CipherOptionProto() { suite_ = 1; inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CipherOptionProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; suite_ = rawValue; } break; } case 18: { bitField0_ |= 0x00000002; inKey_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; inIv_ = input.readBytes(); break; } case 34: { bitField0_ |= 0x00000008; outKey_ = input.readBytes(); break; } case 42: { bitField0_ |= 0x00000010; outIv_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class); } private int bitField0_; public static final int SUITE_FIELD_NUMBER = 1; private int suite_; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } public static final int INKEY_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString inKey_; /** * optional bytes inKey = 2; */ public boolean hasInKey() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes inKey = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getInKey() { return inKey_; } public static final int INIV_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString inIv_; /** * optional bytes inIv = 3; */ public boolean hasInIv() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes inIv = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getInIv() { return inIv_; } public static final int OUTKEY_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString outKey_; /** * optional bytes outKey = 4; */ public boolean hasOutKey() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes outKey = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey() { return outKey_; } public static final int OUTIV_FIELD_NUMBER = 5; private org.apache.hadoop.thirdparty.protobuf.ByteString outIv_; /** * optional bytes outIv = 5; */ public boolean hasOutIv() { return ((bitField0_ & 0x00000010) != 0); } /** * optional bytes outIv = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv() { return outIv_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, inKey_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, inIv_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, outKey_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeBytes(5, outIv_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, suite_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, inKey_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, inIv_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, outKey_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(5, outIv_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) obj; if (hasSuite() != other.hasSuite()) return false; if (hasSuite()) { if (suite_ != other.suite_) return false; } if (hasInKey() != other.hasInKey()) return false; if (hasInKey()) { if (!getInKey() .equals(other.getInKey())) return false; } if (hasInIv() != other.hasInIv()) return false; if (hasInIv()) { if (!getInIv() .equals(other.getInIv())) return false; } if (hasOutKey() != other.hasOutKey()) return false; if (hasOutKey()) { if (!getOutKey() .equals(other.getOutKey())) return false; } if (hasOutIv() != other.hasOutIv()) return false; if (hasOutIv()) { if (!getOutIv() .equals(other.getOutIv())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + suite_; } if (hasInKey()) { hash = (37 * hash) + INKEY_FIELD_NUMBER; hash = (53 * hash) + getInKey().hashCode(); } if (hasInIv()) { hash = (37 * hash) + INIV_FIELD_NUMBER; hash = (53 * hash) + getInIv().hashCode(); } if (hasOutKey()) { hash = (37 * hash) + OUTKEY_FIELD_NUMBER; hash = (53 * hash) + getOutKey().hashCode(); } if (hasOutIv()) { hash = (37 * hash) + OUTIV_FIELD_NUMBER; hash = (53 * hash) + getOutIv().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Cipher option
     * 
* * Protobuf type {@code hadoop.hdfs.CipherOptionProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CipherOptionProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); suite_ = 1; bitField0_ = (bitField0_ & ~0x00000001); inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000010); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.suite_ = suite_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.inKey_ = inKey_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.inIv_ = inIv_; if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.outKey_ = outKey_; if (((from_bitField0_ & 0x00000010) != 0)) { to_bitField0_ |= 0x00000010; } result.outIv_ = outIv_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasInKey()) { setInKey(other.getInKey()); } if (other.hasInIv()) { setInIv(other.getInIv()); } if (other.hasOutKey()) { setOutKey(other.getOutKey()); } if (other.hasOutIv()) { setOutIv(other.getOutIv()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSuite()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int suite_ = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(suite_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = 1; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes inKey = 2; */ public boolean hasInKey() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes inKey = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getInKey() { return inKey_; } /** * optional bytes inKey = 2; */ public Builder setInKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; inKey_ = value; onChanged(); return this; } /** * optional bytes inKey = 2; */ public Builder clearInKey() { bitField0_ = (bitField0_ & ~0x00000002); inKey_ = getDefaultInstance().getInKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes inIv = 3; */ public boolean hasInIv() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes inIv = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getInIv() { return inIv_; } /** * optional bytes inIv = 3; */ public Builder setInIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; inIv_ = value; onChanged(); return this; } /** * optional bytes inIv = 3; */ public Builder clearInIv() { bitField0_ = (bitField0_ & ~0x00000004); inIv_ = getDefaultInstance().getInIv(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes outKey = 4; */ public boolean hasOutKey() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes outKey = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey() { return outKey_; } /** * optional bytes outKey = 4; */ public Builder setOutKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; outKey_ = value; onChanged(); return this; } /** * optional bytes outKey = 4; */ public Builder clearOutKey() { bitField0_ = (bitField0_ & ~0x00000008); outKey_ = getDefaultInstance().getOutKey(); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes outIv = 5; */ public boolean hasOutIv() { return ((bitField0_ & 0x00000010) != 0); } /** * optional bytes outIv = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv() { return outIv_; } /** * optional bytes outIv = 5; */ public Builder setOutIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; outIv_ = value; onChanged(); return this; } /** * optional bytes outIv = 5; */ public Builder clearOutIv() { bitField0_ = (bitField0_ & ~0x00000010); outIv_ = getDefaultInstance().getOutIv(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CipherOptionProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CipherOptionProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public CipherOptionProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new CipherOptionProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface LocatedBlocksProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.LocatedBlocksProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 fileLength = 1; */ boolean hasFileLength(); /** * required uint64 fileLength = 1; */ long getFileLength(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ java.util.List getBlocksList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ int getBlocksCount(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ java.util.List getBlocksOrBuilderList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index); /** * required bool underConstruction = 3; */ boolean hasUnderConstruction(); /** * required bool underConstruction = 3; */ boolean getUnderConstruction(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ boolean hasLastBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder(); /** * required bool isLastBlockComplete = 5; */ boolean hasIsLastBlockComplete(); /** * required bool isLastBlockComplete = 5; */ boolean getIsLastBlockComplete(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ boolean hasFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ boolean hasEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder(); } /** *
   **
   * A set of file blocks and their locations.
   * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto} */ public static final class LocatedBlocksProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.LocatedBlocksProto) LocatedBlocksProtoOrBuilder { private static final long serialVersionUID = 0L; // Use LocatedBlocksProto.newBuilder() to construct. private LocatedBlocksProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private LocatedBlocksProto() { blocks_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private LocatedBlocksProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; fileLength_ = input.readUInt64(); break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) != 0)) { blocks_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } blocks_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry)); break; } case 24: { bitField0_ |= 0x00000002; underConstruction_ = input.readBool(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) != 0)) { subBuilder = lastBlock_.toBuilder(); } lastBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(lastBlock_); lastBlock_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 40: { bitField0_ |= 0x00000008; isLastBlockComplete_ = input.readBool(); break; } case 50: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000010) != 0)) { subBuilder = fileEncryptionInfo_.toBuilder(); } fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fileEncryptionInfo_); fileEncryptionInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } case 58: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null; if (((bitField0_ & 0x00000020) != 0)) { subBuilder = ecPolicy_.toBuilder(); } ecPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(ecPolicy_); ecPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000020; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) != 0)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); } private int bitField0_; public static final int FILELENGTH_FIELD_NUMBER = 1; private long fileLength_; /** * required uint64 fileLength = 1; */ public boolean hasFileLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileLength = 1; */ public long getFileLength() { return fileLength_; } public static final int BLOCKS_FIELD_NUMBER = 2; private java.util.List blocks_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksOrBuilderList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public int getBlocksCount() { return blocks_.size(); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { return blocks_.get(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { return blocks_.get(index); } public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3; private boolean underConstruction_; /** * required bool underConstruction = 3; */ public boolean hasUnderConstruction() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool underConstruction = 3; */ public boolean getUnderConstruction() { return underConstruction_; } public static final int LASTBLOCK_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_; /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public boolean hasLastBlock() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5; private boolean isLastBlockComplete_; /** * required bool isLastBlockComplete = 5; */ public boolean hasIsLastBlockComplete() { return ((bitField0_ & 0x00000008) != 0); } /** * required bool isLastBlockComplete = 5; */ public boolean getIsLastBlockComplete() { return isLastBlockComplete_; } public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 6; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } public static final int ECPOLICY_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public boolean hasEcPolicy() { return ((bitField0_ & 0x00000020) != 0); } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFileLength()) { memoizedIsInitialized = 0; return false; } if (!hasUnderConstruction()) { memoizedIsInitialized = 0; return false; } if (!hasIsLastBlockComplete()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasLastBlock()) { if (!getLastBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, fileLength_); } for (int i = 0; i < blocks_.size(); i++) { output.writeMessage(2, blocks_.get(i)); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(3, underConstruction_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeMessage(4, getLastBlock()); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBool(5, isLastBlockComplete_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(6, getFileEncryptionInfo()); } if (((bitField0_ & 0x00000020) != 0)) { output.writeMessage(7, getEcPolicy()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, fileLength_); } for (int i = 0; i < blocks_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, blocks_.get(i)); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(3, underConstruction_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getLastBlock()); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(5, isLastBlockComplete_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(6, getFileEncryptionInfo()); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getEcPolicy()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj; if (hasFileLength() != other.hasFileLength()) return false; if (hasFileLength()) { if (getFileLength() != other.getFileLength()) return false; } if (!getBlocksList() .equals(other.getBlocksList())) return false; if (hasUnderConstruction() != other.hasUnderConstruction()) return false; if (hasUnderConstruction()) { if (getUnderConstruction() != other.getUnderConstruction()) return false; } if (hasLastBlock() != other.hasLastBlock()) return false; if (hasLastBlock()) { if (!getLastBlock() .equals(other.getLastBlock())) return false; } if (hasIsLastBlockComplete() != other.hasIsLastBlockComplete()) return false; if (hasIsLastBlockComplete()) { if (getIsLastBlockComplete() != other.getIsLastBlockComplete()) return false; } if (hasFileEncryptionInfo() != other.hasFileEncryptionInfo()) return false; if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo() .equals(other.getFileEncryptionInfo())) return false; } if (hasEcPolicy() != other.hasEcPolicy()) return false; if (hasEcPolicy()) { if (!getEcPolicy() .equals(other.getEcPolicy())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFileLength()) { hash = (37 * hash) + FILELENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileLength()); } if (getBlocksCount() > 0) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocksList().hashCode(); } if (hasUnderConstruction()) { hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getUnderConstruction()); } if (hasLastBlock()) { hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER; hash = (53 * hash) + getLastBlock().hashCode(); } if (hasIsLastBlockComplete()) { hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsLastBlockComplete()); } if (hasFileEncryptionInfo()) { hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER; hash = (53 * hash) + getFileEncryptionInfo().hashCode(); } if (hasEcPolicy()) { hash = (37 * hash) + ECPOLICY_FIELD_NUMBER; hash = (53 * hash) + getEcPolicy().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A set of file blocks and their locations.
     * 
* * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.LocatedBlocksProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getBlocksFieldBuilder(); getLastBlockFieldBuilder(); getFileEncryptionInfoFieldBuilder(); getEcPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); fileLength_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); } else { blocksBuilder_.clear(); } underConstruction_ = false; bitField0_ = (bitField0_ & ~0x00000004); if (lastBlockBuilder_ == null) { lastBlock_ = null; } else { lastBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); isLastBlockComplete_ = false; bitField0_ = (bitField0_ & ~0x00000010); if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = null; } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); if (ecPolicyBuilder_ == null) { ecPolicy_ = null; } else { ecPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fileLength_ = fileLength_; to_bitField0_ |= 0x00000001; } if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); bitField0_ = (bitField0_ & ~0x00000002); } result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } if (((from_bitField0_ & 0x00000004) != 0)) { result.underConstruction_ = underConstruction_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000008) != 0)) { if (lastBlockBuilder_ == null) { result.lastBlock_ = lastBlock_; } else { result.lastBlock_ = lastBlockBuilder_.build(); } to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000010) != 0)) { result.isLastBlockComplete_ = isLastBlockComplete_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000020) != 0)) { if (fileEncryptionInfoBuilder_ == null) { result.fileEncryptionInfo_ = fileEncryptionInfo_; } else { result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build(); } to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000040) != 0)) { if (ecPolicyBuilder_ == null) { result.ecPolicy_ = ecPolicy_; } else { result.ecPolicy_ = ecPolicyBuilder_.build(); } to_bitField0_ |= 0x00000020; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this; if (other.hasFileLength()) { setFileLength(other.getFileLength()); } if (blocksBuilder_ == null) { if (!other.blocks_.isEmpty()) { if (blocks_.isEmpty()) { blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureBlocksIsMutable(); blocks_.addAll(other.blocks_); } onChanged(); } } else { if (!other.blocks_.isEmpty()) { if (blocksBuilder_.isEmpty()) { blocksBuilder_.dispose(); blocksBuilder_ = null; blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000002); blocksBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getBlocksFieldBuilder() : null; } else { blocksBuilder_.addAllMessages(other.blocks_); } } } if (other.hasUnderConstruction()) { setUnderConstruction(other.getUnderConstruction()); } if (other.hasLastBlock()) { mergeLastBlock(other.getLastBlock()); } if (other.hasIsLastBlockComplete()) { setIsLastBlockComplete(other.getIsLastBlockComplete()); } if (other.hasFileEncryptionInfo()) { mergeFileEncryptionInfo(other.getFileEncryptionInfo()); } if (other.hasEcPolicy()) { mergeEcPolicy(other.getEcPolicy()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFileLength()) { return false; } if (!hasUnderConstruction()) { return false; } if (!hasIsLastBlockComplete()) { return false; } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { return false; } } if (hasLastBlock()) { if (!getLastBlock().isInitialized()) { return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long fileLength_ ; /** * required uint64 fileLength = 1; */ public boolean hasFileLength() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileLength = 1; */ public long getFileLength() { return fileLength_; } /** * required uint64 fileLength = 1; */ public Builder setFileLength(long value) { bitField0_ |= 0x00000001; fileLength_ = value; onChanged(); return this; } /** * required uint64 fileLength = 1; */ public Builder clearFileLength() { bitField0_ = (bitField0_ & ~0x00000001); fileLength_ = 0L; onChanged(); return this; } private java.util.List blocks_ = java.util.Collections.emptyList(); private void ensureBlocksIsMutable() { if (!((bitField0_ & 0x00000002) != 0)) { blocks_ = new java.util.ArrayList(blocks_); bitField0_ |= 0x00000002; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksList() { if (blocksBuilder_ == null) { return java.util.Collections.unmodifiableList(blocks_); } else { return blocksBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public int getBlocksCount() { if (blocksBuilder_ == null) { return blocks_.size(); } else { return blocksBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.set(index, value); onChanged(); } else { blocksBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.set(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(value); onChanged(); } else { blocksBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(index, value); onChanged(); } else { blocksBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addAllBlocks( java.lang.Iterable values) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, blocks_); onChanged(); } else { blocksBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { blocksBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder removeBlocks(int index) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.remove(index); onChanged(); } else { blocksBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder( int index) { return getBlocksFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksOrBuilderList() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blocks_); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() { return getBlocksFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder( int index) { return getBlocksFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksBuilderList() { return getBlocksFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( blocks_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } private boolean underConstruction_ ; /** * required bool underConstruction = 3; */ public boolean hasUnderConstruction() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool underConstruction = 3; */ public boolean getUnderConstruction() { return underConstruction_; } /** * required bool underConstruction = 3; */ public Builder setUnderConstruction(boolean value) { bitField0_ |= 0x00000004; underConstruction_ = value; onChanged(); return this; } /** * required bool underConstruction = 3; */ public Builder clearUnderConstruction() { bitField0_ = (bitField0_ & ~0x00000004); underConstruction_ = false; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_; /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public boolean hasLastBlock() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { if (lastBlockBuilder_ == null) { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } else { return lastBlockBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (lastBlockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } lastBlock_ = value; onChanged(); } else { lastBlockBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder setLastBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (lastBlockBuilder_ == null) { lastBlock_ = builderForValue.build(); onChanged(); } else { lastBlockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (lastBlockBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && lastBlock_ != null && lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(lastBlock_).mergeFrom(value).buildPartial(); } else { lastBlock_ = value; } onChanged(); } else { lastBlockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder clearLastBlock() { if (lastBlockBuilder_ == null) { lastBlock_ = null; onChanged(); } else { lastBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() { bitField0_ |= 0x00000008; onChanged(); return getLastBlockFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { if (lastBlockBuilder_ != null) { return lastBlockBuilder_.getMessageOrBuilder(); } else { return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_; } } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getLastBlockFieldBuilder() { if (lastBlockBuilder_ == null) { lastBlockBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( getLastBlock(), getParentForChildren(), isClean()); lastBlock_ = null; } return lastBlockBuilder_; } private boolean isLastBlockComplete_ ; /** * required bool isLastBlockComplete = 5; */ public boolean hasIsLastBlockComplete() { return ((bitField0_ & 0x00000010) != 0); } /** * required bool isLastBlockComplete = 5; */ public boolean getIsLastBlockComplete() { return isLastBlockComplete_; } /** * required bool isLastBlockComplete = 5; */ public Builder setIsLastBlockComplete(boolean value) { bitField0_ |= 0x00000010; isLastBlockComplete_ = value; onChanged(); return this; } /** * required bool isLastBlockComplete = 5; */ public Builder clearIsLastBlockComplete() { bitField0_ = (bitField0_ & ~0x00000010); isLastBlockComplete_ = false; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00000020) != 0); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } else { return fileEncryptionInfoBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fileEncryptionInfo_ = value; onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder setFileEncryptionInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = builderForValue.build(); onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (((bitField0_ & 0x00000020) != 0) && fileEncryptionInfo_ != null && fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial(); } else { fileEncryptionInfo_ = value; } onChanged(); } else { fileEncryptionInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder clearFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = null; onChanged(); } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() { bitField0_ |= 0x00000020; onChanged(); return getFileEncryptionInfoFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { if (fileEncryptionInfoBuilder_ != null) { return fileEncryptionInfoBuilder_.getMessageOrBuilder(); } else { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> getFileEncryptionInfoFieldBuilder() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>( getFileEncryptionInfo(), getParentForChildren(), isClean()); fileEncryptionInfo_ = null; } return fileEncryptionInfoBuilder_; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_; /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public boolean hasEcPolicy() { return ((bitField0_ & 0x00000040) != 0); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { if (ecPolicyBuilder_ == null) { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } else { return ecPolicyBuilder_.getMessage(); } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ecPolicy_ = value; onChanged(); } else { ecPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000040; return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder setEcPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (ecPolicyBuilder_ == null) { ecPolicy_ = builderForValue.build(); onChanged(); } else { ecPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && ecPolicy_ != null && ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) { ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(ecPolicy_).mergeFrom(value).buildPartial(); } else { ecPolicy_ = value; } onChanged(); } else { ecPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public Builder clearEcPolicy() { if (ecPolicyBuilder_ == null) { ecPolicy_ = null; onChanged(); } else { ecPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() { bitField0_ |= 0x00000040; onChanged(); return getEcPolicyFieldBuilder().getBuilder(); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { if (ecPolicyBuilder_ != null) { return ecPolicyBuilder_.getMessageOrBuilder(); } else { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> getEcPolicyFieldBuilder() { if (ecPolicyBuilder_ == null) { ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>( getEcPolicy(), getParentForChildren(), isClean()); ecPolicy_ = null; } return ecPolicyBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlocksProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlocksProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public LocatedBlocksProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new LocatedBlocksProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ECSchemaOptionEntryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECSchemaOptionEntryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string key = 1; */ boolean hasKey(); /** * required string key = 1; */ java.lang.String getKey(); /** * required string key = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyBytes(); /** * required string value = 2; */ boolean hasValue(); /** * required string value = 2; */ java.lang.String getValue(); /** * required string value = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getValueBytes(); } /** *
   **
   * ECSchema options entry
   * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto} */ public static final class ECSchemaOptionEntryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECSchemaOptionEntryProto) ECSchemaOptionEntryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ECSchemaOptionEntryProto.newBuilder() to construct. private ECSchemaOptionEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ECSchemaOptionEntryProto() { key_ = ""; value_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ECSchemaOptionEntryProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; key_ = bs; break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; value_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class); } private int bitField0_; public static final int KEY_FIELD_NUMBER = 1; private volatile java.lang.Object key_; /** * required string key = 1; */ public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required string key = 1; */ public java.lang.String getKey() { java.lang.Object ref = key_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { key_ = s; } return s; } } /** * required string key = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); key_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int VALUE_FIELD_NUMBER = 2; private volatile java.lang.Object value_; /** * required string value = 2; */ public boolean hasValue() { return ((bitField0_ & 0x00000002) != 0); } /** * required string value = 2; */ public java.lang.String getValue() { java.lang.Object ref = value_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { value_ = s; } return s; } } /** * required string value = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getValueBytes() { java.lang.Object ref = value_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); value_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasValue()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, key_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, value_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, key_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, value_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) obj; if (hasKey() != other.hasKey()) return false; if (hasKey()) { if (!getKey() .equals(other.getKey())) return false; } if (hasValue() != other.hasValue()) return false; if (hasValue()) { if (!getValue() .equals(other.getValue())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasValue()) { hash = (37 * hash) + VALUE_FIELD_NUMBER; hash = (53 * hash) + getValue().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * ECSchema options entry
     * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECSchemaOptionEntryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); key_ = ""; bitField0_ = (bitField0_ & ~0x00000001); value_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.key_ = key_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()) return this; if (other.hasKey()) { bitField0_ |= 0x00000001; key_ = other.key_; onChanged(); } if (other.hasValue()) { bitField0_ |= 0x00000002; value_ = other.value_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasKey()) { return false; } if (!hasValue()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object key_ = ""; /** * required string key = 1; */ public boolean hasKey() { return ((bitField0_ & 0x00000001) != 0); } /** * required string key = 1; */ public java.lang.String getKey() { java.lang.Object ref = key_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { key_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string key = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); key_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string key = 1; */ public Builder setKey( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; key_ = value; onChanged(); return this; } /** * required string key = 1; */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); key_ = getDefaultInstance().getKey(); onChanged(); return this; } /** * required string key = 1; */ public Builder setKeyBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; key_ = value; onChanged(); return this; } private java.lang.Object value_ = ""; /** * required string value = 2; */ public boolean hasValue() { return ((bitField0_ & 0x00000002) != 0); } /** * required string value = 2; */ public java.lang.String getValue() { java.lang.Object ref = value_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { value_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string value = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getValueBytes() { java.lang.Object ref = value_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); value_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string value = 2; */ public Builder setValue( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; value_ = value; onChanged(); return this; } /** * required string value = 2; */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); value_ = getDefaultInstance().getValue(); onChanged(); return this; } /** * required string value = 2; */ public Builder setValueBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; value_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaOptionEntryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaOptionEntryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ECSchemaOptionEntryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ECSchemaOptionEntryProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ECSchemaProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECSchemaProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string codecName = 1; */ boolean hasCodecName(); /** * required string codecName = 1; */ java.lang.String getCodecName(); /** * required string codecName = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getCodecNameBytes(); /** * required uint32 dataUnits = 2; */ boolean hasDataUnits(); /** * required uint32 dataUnits = 2; */ int getDataUnits(); /** * required uint32 parityUnits = 3; */ boolean hasParityUnits(); /** * required uint32 parityUnits = 3; */ int getParityUnits(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ java.util.List getOptionsList(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ int getOptionsCount(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ java.util.List getOptionsOrBuilderList(); /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder( int index); } /** *
   **
   * ECSchema for erasurecoding
   * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaProto} */ public static final class ECSchemaProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECSchemaProto) ECSchemaProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ECSchemaProto.newBuilder() to construct. private ECSchemaProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ECSchemaProto() { codecName_ = ""; options_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ECSchemaProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; codecName_ = bs; break; } case 16: { bitField0_ |= 0x00000002; dataUnits_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; parityUnits_ = input.readUInt32(); break; } case 34: { if (!((mutable_bitField0_ & 0x00000008) != 0)) { options_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } options_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000008) != 0)) { options_ = java.util.Collections.unmodifiableList(options_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class); } private int bitField0_; public static final int CODECNAME_FIELD_NUMBER = 1; private volatile java.lang.Object codecName_; /** * required string codecName = 1; */ public boolean hasCodecName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string codecName = 1; */ public java.lang.String getCodecName() { java.lang.Object ref = codecName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { codecName_ = s; } return s; } } /** * required string codecName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCodecNameBytes() { java.lang.Object ref = codecName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); codecName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int DATAUNITS_FIELD_NUMBER = 2; private int dataUnits_; /** * required uint32 dataUnits = 2; */ public boolean hasDataUnits() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 dataUnits = 2; */ public int getDataUnits() { return dataUnits_; } public static final int PARITYUNITS_FIELD_NUMBER = 3; private int parityUnits_; /** * required uint32 parityUnits = 3; */ public boolean hasParityUnits() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 parityUnits = 3; */ public int getParityUnits() { return parityUnits_; } public static final int OPTIONS_FIELD_NUMBER = 4; private java.util.List options_; /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsList() { return options_; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsOrBuilderList() { return options_; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public int getOptionsCount() { return options_.size(); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) { return options_.get(index); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder( int index) { return options_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasCodecName()) { memoizedIsInitialized = 0; return false; } if (!hasDataUnits()) { memoizedIsInitialized = 0; return false; } if (!hasParityUnits()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getOptionsCount(); i++) { if (!getOptions(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, codecName_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, dataUnits_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, parityUnits_); } for (int i = 0; i < options_.size(); i++) { output.writeMessage(4, options_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, codecName_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, dataUnits_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, parityUnits_); } for (int i = 0; i < options_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, options_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) obj; if (hasCodecName() != other.hasCodecName()) return false; if (hasCodecName()) { if (!getCodecName() .equals(other.getCodecName())) return false; } if (hasDataUnits() != other.hasDataUnits()) return false; if (hasDataUnits()) { if (getDataUnits() != other.getDataUnits()) return false; } if (hasParityUnits() != other.hasParityUnits()) return false; if (hasParityUnits()) { if (getParityUnits() != other.getParityUnits()) return false; } if (!getOptionsList() .equals(other.getOptionsList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasCodecName()) { hash = (37 * hash) + CODECNAME_FIELD_NUMBER; hash = (53 * hash) + getCodecName().hashCode(); } if (hasDataUnits()) { hash = (37 * hash) + DATAUNITS_FIELD_NUMBER; hash = (53 * hash) + getDataUnits(); } if (hasParityUnits()) { hash = (37 * hash) + PARITYUNITS_FIELD_NUMBER; hash = (53 * hash) + getParityUnits(); } if (getOptionsCount() > 0) { hash = (37 * hash) + OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getOptionsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * ECSchema for erasurecoding
     * 
* * Protobuf type {@code hadoop.hdfs.ECSchemaProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECSchemaProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getOptionsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); codecName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); dataUnits_ = 0; bitField0_ = (bitField0_ & ~0x00000002); parityUnits_ = 0; bitField0_ = (bitField0_ & ~0x00000004); if (optionsBuilder_ == null) { options_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); } else { optionsBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.codecName_ = codecName_; if (((from_bitField0_ & 0x00000002) != 0)) { result.dataUnits_ = dataUnits_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.parityUnits_ = parityUnits_; to_bitField0_ |= 0x00000004; } if (optionsBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0)) { options_ = java.util.Collections.unmodifiableList(options_); bitField0_ = (bitField0_ & ~0x00000008); } result.options_ = options_; } else { result.options_ = optionsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) return this; if (other.hasCodecName()) { bitField0_ |= 0x00000001; codecName_ = other.codecName_; onChanged(); } if (other.hasDataUnits()) { setDataUnits(other.getDataUnits()); } if (other.hasParityUnits()) { setParityUnits(other.getParityUnits()); } if (optionsBuilder_ == null) { if (!other.options_.isEmpty()) { if (options_.isEmpty()) { options_ = other.options_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureOptionsIsMutable(); options_.addAll(other.options_); } onChanged(); } } else { if (!other.options_.isEmpty()) { if (optionsBuilder_.isEmpty()) { optionsBuilder_.dispose(); optionsBuilder_ = null; options_ = other.options_; bitField0_ = (bitField0_ & ~0x00000008); optionsBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getOptionsFieldBuilder() : null; } else { optionsBuilder_.addAllMessages(other.options_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasCodecName()) { return false; } if (!hasDataUnits()) { return false; } if (!hasParityUnits()) { return false; } for (int i = 0; i < getOptionsCount(); i++) { if (!getOptions(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object codecName_ = ""; /** * required string codecName = 1; */ public boolean hasCodecName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string codecName = 1; */ public java.lang.String getCodecName() { java.lang.Object ref = codecName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { codecName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string codecName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCodecNameBytes() { java.lang.Object ref = codecName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); codecName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string codecName = 1; */ public Builder setCodecName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; codecName_ = value; onChanged(); return this; } /** * required string codecName = 1; */ public Builder clearCodecName() { bitField0_ = (bitField0_ & ~0x00000001); codecName_ = getDefaultInstance().getCodecName(); onChanged(); return this; } /** * required string codecName = 1; */ public Builder setCodecNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; codecName_ = value; onChanged(); return this; } private int dataUnits_ ; /** * required uint32 dataUnits = 2; */ public boolean hasDataUnits() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 dataUnits = 2; */ public int getDataUnits() { return dataUnits_; } /** * required uint32 dataUnits = 2; */ public Builder setDataUnits(int value) { bitField0_ |= 0x00000002; dataUnits_ = value; onChanged(); return this; } /** * required uint32 dataUnits = 2; */ public Builder clearDataUnits() { bitField0_ = (bitField0_ & ~0x00000002); dataUnits_ = 0; onChanged(); return this; } private int parityUnits_ ; /** * required uint32 parityUnits = 3; */ public boolean hasParityUnits() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 parityUnits = 3; */ public int getParityUnits() { return parityUnits_; } /** * required uint32 parityUnits = 3; */ public Builder setParityUnits(int value) { bitField0_ |= 0x00000004; parityUnits_ = value; onChanged(); return this; } /** * required uint32 parityUnits = 3; */ public Builder clearParityUnits() { bitField0_ = (bitField0_ & ~0x00000004); parityUnits_ = 0; onChanged(); return this; } private java.util.List options_ = java.util.Collections.emptyList(); private void ensureOptionsIsMutable() { if (!((bitField0_ & 0x00000008) != 0)) { options_ = new java.util.ArrayList(options_); bitField0_ |= 0x00000008; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> optionsBuilder_; /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsList() { if (optionsBuilder_ == null) { return java.util.Collections.unmodifiableList(options_); } else { return optionsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public int getOptionsCount() { if (optionsBuilder_ == null) { return options_.size(); } else { return optionsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) { if (optionsBuilder_ == null) { return options_.get(index); } else { return optionsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder setOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) { if (optionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureOptionsIsMutable(); options_.set(index, value); onChanged(); } else { optionsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder setOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.set(index, builderForValue.build()); onChanged(); } else { optionsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) { if (optionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureOptionsIsMutable(); options_.add(value); onChanged(); } else { optionsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) { if (optionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureOptionsIsMutable(); options_.add(index, value); onChanged(); } else { optionsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.add(builderForValue.build()); onChanged(); } else { optionsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addOptions( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.add(index, builderForValue.build()); onChanged(); } else { optionsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder addAllOptions( java.lang.Iterable values) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, options_); onChanged(); } else { optionsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder clearOptions() { if (optionsBuilder_ == null) { options_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { optionsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public Builder removeOptions(int index) { if (optionsBuilder_ == null) { ensureOptionsIsMutable(); options_.remove(index); onChanged(); } else { optionsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder getOptionsBuilder( int index) { return getOptionsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder( int index) { if (optionsBuilder_ == null) { return options_.get(index); } else { return optionsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsOrBuilderList() { if (optionsBuilder_ != null) { return optionsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(options_); } } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder() { return getOptionsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder( int index) { return getOptionsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4; */ public java.util.List getOptionsBuilderList() { return getOptionsFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> getOptionsFieldBuilder() { if (optionsBuilder_ == null) { optionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>( options_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); options_ = null; } return optionsBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ECSchemaProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ECSchemaProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ErasureCodingPolicyProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ErasureCodingPolicyProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional string name = 1; */ boolean hasName(); /** * optional string name = 1; */ java.lang.String getName(); /** * optional string name = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes(); /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ boolean hasSchema(); /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema(); /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder(); /** * optional uint32 cellSize = 3; */ boolean hasCellSize(); /** * optional uint32 cellSize = 3; */ int getCellSize(); /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; */ boolean hasId(); /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; */ int getId(); /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ boolean hasState(); /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState(); } /** * Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto} */ public static final class ErasureCodingPolicyProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ErasureCodingPolicyProto) ErasureCodingPolicyProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ErasureCodingPolicyProto.newBuilder() to construct. private ErasureCodingPolicyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ErasureCodingPolicyProto() { name_ = ""; state_ = 2; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ErasureCodingPolicyProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; name_ = bs; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) != 0)) { subBuilder = schema_.toBuilder(); } schema_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(schema_); schema_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 24: { bitField0_ |= 0x00000004; cellSize_ = input.readUInt32(); break; } case 32: { bitField0_ |= 0x00000008; id_ = input.readUInt32(); break; } case 40: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(5, rawValue); } else { bitField0_ |= 0x00000010; state_ = rawValue; } break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class); } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; private volatile java.lang.Object name_; /** * optional string name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string name = 1; */ public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * optional string name = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int SCHEMA_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_; /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public boolean hasSchema() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } public static final int CELLSIZE_FIELD_NUMBER = 3; private int cellSize_; /** * optional uint32 cellSize = 3; */ public boolean hasCellSize() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 cellSize = 3; */ public int getCellSize() { return cellSize_; } public static final int ID_FIELD_NUMBER = 4; private int id_; /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; */ public boolean hasId() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * Actually a byte - only 8 bits used
     * 
* * required uint32 id = 4; */ public int getId() { return id_; } public static final int STATE_FIELD_NUMBER = 5; private int state_; /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ public boolean hasState() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.valueOf(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED : result; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasId()) { memoizedIsInitialized = 0; return false; } if (hasSchema()) { if (!getSchema().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, name_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(2, getSchema()); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, cellSize_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, id_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeEnum(5, state_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, name_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, getSchema()); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, cellSize_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, id_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(5, state_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) obj; if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasSchema() != other.hasSchema()) return false; if (hasSchema()) { if (!getSchema() .equals(other.getSchema())) return false; } if (hasCellSize() != other.hasCellSize()) return false; if (hasCellSize()) { if (getCellSize() != other.getCellSize()) return false; } if (hasId() != other.hasId()) return false; if (hasId()) { if (getId() != other.getId()) return false; } if (hasState() != other.hasState()) return false; if (hasState()) { if (state_ != other.state_) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasSchema()) { hash = (37 * hash) + SCHEMA_FIELD_NUMBER; hash = (53 * hash) + getSchema().hashCode(); } if (hasCellSize()) { hash = (37 * hash) + CELLSIZE_FIELD_NUMBER; hash = (53 * hash) + getCellSize(); } if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + getId(); } if (hasState()) { hash = (37 * hash) + STATE_FIELD_NUMBER; hash = (53 * hash) + state_; } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ErasureCodingPolicyProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSchemaFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (schemaBuilder_ == null) { schema_ = null; } else { schemaBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); cellSize_ = 0; bitField0_ = (bitField0_ & ~0x00000004); id_ = 0; bitField0_ = (bitField0_ & ~0x00000008); state_ = 2; bitField0_ = (bitField0_ & ~0x00000010); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.name_ = name_; if (((from_bitField0_ & 0x00000002) != 0)) { if (schemaBuilder_ == null) { result.schema_ = schema_; } else { result.schema_ = schemaBuilder_.build(); } to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.cellSize_ = cellSize_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.id_ = id_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { to_bitField0_ |= 0x00000010; } result.state_ = state_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) return this; if (other.hasName()) { bitField0_ |= 0x00000001; name_ = other.name_; onChanged(); } if (other.hasSchema()) { mergeSchema(other.getSchema()); } if (other.hasCellSize()) { setCellSize(other.getCellSize()); } if (other.hasId()) { setId(other.getId()); } if (other.hasState()) { setState(other.getState()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasId()) { return false; } if (hasSchema()) { if (!getSchema().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object name_ = ""; /** * optional string name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string name = 1; */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string name = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string name = 1; */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } /** * optional string name = 1; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * optional string name = 1; */ public Builder setNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder> schemaBuilder_; /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public boolean hasSchema() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() { if (schemaBuilder_ == null) { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } else { return schemaBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder setSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) { if (schemaBuilder_ == null) { if (value == null) { throw new NullPointerException(); } schema_ = value; onChanged(); } else { schemaBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder setSchema( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder builderForValue) { if (schemaBuilder_ == null) { schema_ = builderForValue.build(); onChanged(); } else { schemaBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder mergeSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) { if (schemaBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0) && schema_ != null && schema_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) { schema_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.newBuilder(schema_).mergeFrom(value).buildPartial(); } else { schema_ = value; } onChanged(); } else { schemaBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public Builder clearSchema() { if (schemaBuilder_ == null) { schema_ = null; onChanged(); } else { schemaBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder getSchemaBuilder() { bitField0_ |= 0x00000002; onChanged(); return getSchemaFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() { if (schemaBuilder_ != null) { return schemaBuilder_.getMessageOrBuilder(); } else { return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_; } } /** * optional .hadoop.hdfs.ECSchemaProto schema = 2; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder> getSchemaFieldBuilder() { if (schemaBuilder_ == null) { schemaBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder>( getSchema(), getParentForChildren(), isClean()); schema_ = null; } return schemaBuilder_; } private int cellSize_ ; /** * optional uint32 cellSize = 3; */ public boolean hasCellSize() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 cellSize = 3; */ public int getCellSize() { return cellSize_; } /** * optional uint32 cellSize = 3; */ public Builder setCellSize(int value) { bitField0_ |= 0x00000004; cellSize_ = value; onChanged(); return this; } /** * optional uint32 cellSize = 3; */ public Builder clearCellSize() { bitField0_ = (bitField0_ & ~0x00000004); cellSize_ = 0; onChanged(); return this; } private int id_ ; /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; */ public boolean hasId() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; */ public int getId() { return id_; } /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; */ public Builder setId(int value) { bitField0_ |= 0x00000008; id_ = value; onChanged(); return this; } /** *
       * Actually a byte - only 8 bits used
       * 
* * required uint32 id = 4; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000008); id_ = 0; onChanged(); return this; } private int state_ = 2; /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ public boolean hasState() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.valueOf(state_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED : result; } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; state_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED]; */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000010); state_ = 2; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ErasureCodingPolicyProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ErasureCodingPolicyProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ErasureCodingPolicyProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ErasureCodingPolicyProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface AddErasureCodingPolicyResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.AddErasureCodingPolicyResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ boolean hasPolicy(); /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy(); /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder(); /** * required bool succeed = 2; */ boolean hasSucceed(); /** * required bool succeed = 2; */ boolean getSucceed(); /** * optional string errorMsg = 3; */ boolean hasErrorMsg(); /** * optional string errorMsg = 3; */ java.lang.String getErrorMsg(); /** * optional string errorMsg = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getErrorMsgBytes(); } /** * Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto} */ public static final class AddErasureCodingPolicyResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.AddErasureCodingPolicyResponseProto) AddErasureCodingPolicyResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use AddErasureCodingPolicyResponseProto.newBuilder() to construct. private AddErasureCodingPolicyResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private AddErasureCodingPolicyResponseProto() { errorMsg_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AddErasureCodingPolicyResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = policy_.toBuilder(); } policy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(policy_); policy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; succeed_ = input.readBool(); break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; errorMsg_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class); } private int bitField0_; public static final int POLICY_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_; /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public boolean hasPolicy() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } public static final int SUCCEED_FIELD_NUMBER = 2; private boolean succeed_; /** * required bool succeed = 2; */ public boolean hasSucceed() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool succeed = 2; */ public boolean getSucceed() { return succeed_; } public static final int ERRORMSG_FIELD_NUMBER = 3; private volatile java.lang.Object errorMsg_; /** * optional string errorMsg = 3; */ public boolean hasErrorMsg() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string errorMsg = 3; */ public java.lang.String getErrorMsg() { java.lang.Object ref = errorMsg_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { errorMsg_ = s; } return s; } } /** * optional string errorMsg = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getErrorMsgBytes() { java.lang.Object ref = errorMsg_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMsg_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasPolicy()) { memoizedIsInitialized = 0; return false; } if (!hasSucceed()) { memoizedIsInitialized = 0; return false; } if (!getPolicy().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getPolicy()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, succeed_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, errorMsg_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getPolicy()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, succeed_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, errorMsg_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) obj; if (hasPolicy() != other.hasPolicy()) return false; if (hasPolicy()) { if (!getPolicy() .equals(other.getPolicy())) return false; } if (hasSucceed() != other.hasSucceed()) return false; if (hasSucceed()) { if (getSucceed() != other.getSucceed()) return false; } if (hasErrorMsg() != other.hasErrorMsg()) return false; if (hasErrorMsg()) { if (!getErrorMsg() .equals(other.getErrorMsg())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPolicy()) { hash = (37 * hash) + POLICY_FIELD_NUMBER; hash = (53 * hash) + getPolicy().hashCode(); } if (hasSucceed()) { hash = (37 * hash) + SUCCEED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getSucceed()); } if (hasErrorMsg()) { hash = (37 * hash) + ERRORMSG_FIELD_NUMBER; hash = (53 * hash) + getErrorMsg().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.AddErasureCodingPolicyResponseProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (policyBuilder_ == null) { policy_ = null; } else { policyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); succeed_ = false; bitField0_ = (bitField0_ & ~0x00000002); errorMsg_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (policyBuilder_ == null) { result.policy_ = policy_; } else { result.policy_ = policyBuilder_.build(); } to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.succeed_ = succeed_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.errorMsg_ = errorMsg_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance()) return this; if (other.hasPolicy()) { mergePolicy(other.getPolicy()); } if (other.hasSucceed()) { setSucceed(other.getSucceed()); } if (other.hasErrorMsg()) { bitField0_ |= 0x00000004; errorMsg_ = other.errorMsg_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasPolicy()) { return false; } if (!hasSucceed()) { return false; } if (!getPolicy().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> policyBuilder_; /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public boolean hasPolicy() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() { if (policyBuilder_ == null) { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } else { return policyBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder setPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (policyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } policy_ = value; onChanged(); } else { policyBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder setPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (policyBuilder_ == null) { policy_ = builderForValue.build(); onChanged(); } else { policyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder mergePolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (policyBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && policy_ != null && policy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) { policy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(policy_).mergeFrom(value).buildPartial(); } else { policy_ = value; } onChanged(); } else { policyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public Builder clearPolicy() { if (policyBuilder_ == null) { policy_ = null; onChanged(); } else { policyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getPolicyBuilder() { bitField0_ |= 0x00000001; onChanged(); return getPolicyFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() { if (policyBuilder_ != null) { return policyBuilder_.getMessageOrBuilder(); } else { return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_; } } /** * required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> getPolicyFieldBuilder() { if (policyBuilder_ == null) { policyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>( getPolicy(), getParentForChildren(), isClean()); policy_ = null; } return policyBuilder_; } private boolean succeed_ ; /** * required bool succeed = 2; */ public boolean hasSucceed() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool succeed = 2; */ public boolean getSucceed() { return succeed_; } /** * required bool succeed = 2; */ public Builder setSucceed(boolean value) { bitField0_ |= 0x00000002; succeed_ = value; onChanged(); return this; } /** * required bool succeed = 2; */ public Builder clearSucceed() { bitField0_ = (bitField0_ & ~0x00000002); succeed_ = false; onChanged(); return this; } private java.lang.Object errorMsg_ = ""; /** * optional string errorMsg = 3; */ public boolean hasErrorMsg() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string errorMsg = 3; */ public java.lang.String getErrorMsg() { java.lang.Object ref = errorMsg_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { errorMsg_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string errorMsg = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getErrorMsgBytes() { java.lang.Object ref = errorMsg_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMsg_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string errorMsg = 3; */ public Builder setErrorMsg( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; errorMsg_ = value; onChanged(); return this; } /** * optional string errorMsg = 3; */ public Builder clearErrorMsg() { bitField0_ = (bitField0_ & ~0x00000004); errorMsg_ = getDefaultInstance().getErrorMsg(); onChanged(); return this; } /** * optional string errorMsg = 3; */ public Builder setErrorMsgBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; errorMsg_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public AddErasureCodingPolicyResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new AddErasureCodingPolicyResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ECTopologyVerifierResultProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECTopologyVerifierResultProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string resultMessage = 1; */ boolean hasResultMessage(); /** * required string resultMessage = 1; */ java.lang.String getResultMessage(); /** * required string resultMessage = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getResultMessageBytes(); /** * required bool isSupported = 2; */ boolean hasIsSupported(); /** * required bool isSupported = 2; */ boolean getIsSupported(); } /** * Protobuf type {@code hadoop.hdfs.ECTopologyVerifierResultProto} */ public static final class ECTopologyVerifierResultProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECTopologyVerifierResultProto) ECTopologyVerifierResultProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ECTopologyVerifierResultProto.newBuilder() to construct. private ECTopologyVerifierResultProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ECTopologyVerifierResultProto() { resultMessage_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ECTopologyVerifierResultProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; resultMessage_ = bs; break; } case 16: { bitField0_ |= 0x00000002; isSupported_ = input.readBool(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.Builder.class); } private int bitField0_; public static final int RESULTMESSAGE_FIELD_NUMBER = 1; private volatile java.lang.Object resultMessage_; /** * required string resultMessage = 1; */ public boolean hasResultMessage() { return ((bitField0_ & 0x00000001) != 0); } /** * required string resultMessage = 1; */ public java.lang.String getResultMessage() { java.lang.Object ref = resultMessage_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { resultMessage_ = s; } return s; } } /** * required string resultMessage = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getResultMessageBytes() { java.lang.Object ref = resultMessage_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); resultMessage_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int ISSUPPORTED_FIELD_NUMBER = 2; private boolean isSupported_; /** * required bool isSupported = 2; */ public boolean hasIsSupported() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool isSupported = 2; */ public boolean getIsSupported() { return isSupported_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasResultMessage()) { memoizedIsInitialized = 0; return false; } if (!hasIsSupported()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, resultMessage_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, isSupported_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, resultMessage_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, isSupported_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto) obj; if (hasResultMessage() != other.hasResultMessage()) return false; if (hasResultMessage()) { if (!getResultMessage() .equals(other.getResultMessage())) return false; } if (hasIsSupported() != other.hasIsSupported()) return false; if (hasIsSupported()) { if (getIsSupported() != other.getIsSupported()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasResultMessage()) { hash = (37 * hash) + RESULTMESSAGE_FIELD_NUMBER; hash = (53 * hash) + getResultMessage().hashCode(); } if (hasIsSupported()) { hash = (37 * hash) + ISSUPPORTED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsSupported()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ECTopologyVerifierResultProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECTopologyVerifierResultProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); resultMessage_ = ""; bitField0_ = (bitField0_ & ~0x00000001); isSupported_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.resultMessage_ = resultMessage_; if (((from_bitField0_ & 0x00000002) != 0)) { result.isSupported_ = isSupported_; to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.getDefaultInstance()) return this; if (other.hasResultMessage()) { bitField0_ |= 0x00000001; resultMessage_ = other.resultMessage_; onChanged(); } if (other.hasIsSupported()) { setIsSupported(other.getIsSupported()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasResultMessage()) { return false; } if (!hasIsSupported()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object resultMessage_ = ""; /** * required string resultMessage = 1; */ public boolean hasResultMessage() { return ((bitField0_ & 0x00000001) != 0); } /** * required string resultMessage = 1; */ public java.lang.String getResultMessage() { java.lang.Object ref = resultMessage_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { resultMessage_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string resultMessage = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getResultMessageBytes() { java.lang.Object ref = resultMessage_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); resultMessage_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string resultMessage = 1; */ public Builder setResultMessage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; resultMessage_ = value; onChanged(); return this; } /** * required string resultMessage = 1; */ public Builder clearResultMessage() { bitField0_ = (bitField0_ & ~0x00000001); resultMessage_ = getDefaultInstance().getResultMessage(); onChanged(); return this; } /** * required string resultMessage = 1; */ public Builder setResultMessageBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; resultMessage_ = value; onChanged(); return this; } private boolean isSupported_ ; /** * required bool isSupported = 2; */ public boolean hasIsSupported() { return ((bitField0_ & 0x00000002) != 0); } /** * required bool isSupported = 2; */ public boolean getIsSupported() { return isSupported_; } /** * required bool isSupported = 2; */ public Builder setIsSupported(boolean value) { bitField0_ |= 0x00000002; isSupported_ = value; onChanged(); return this; } /** * required bool isSupported = 2; */ public Builder clearIsSupported() { bitField0_ = (bitField0_ & ~0x00000002); isSupported_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECTopologyVerifierResultProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECTopologyVerifierResultProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ECTopologyVerifierResultProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ECTopologyVerifierResultProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface HdfsPathHandleProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.HdfsPathHandleProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 inodeId = 1; */ boolean hasInodeId(); /** * optional uint64 inodeId = 1; */ long getInodeId(); /** * optional uint64 mtime = 2; */ boolean hasMtime(); /** * optional uint64 mtime = 2; */ long getMtime(); /** * optional string path = 3; */ boolean hasPath(); /** * optional string path = 3; */ java.lang.String getPath(); /** * optional string path = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes(); } /** *
   **
   * Placeholder type for consistent HDFS operations.
   * 
* * Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto} */ public static final class HdfsPathHandleProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.HdfsPathHandleProto) HdfsPathHandleProtoOrBuilder { private static final long serialVersionUID = 0L; // Use HdfsPathHandleProto.newBuilder() to construct. private HdfsPathHandleProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private HdfsPathHandleProto() { path_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private HdfsPathHandleProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; inodeId_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; mtime_ = input.readUInt64(); break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; path_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class); } private int bitField0_; public static final int INODEID_FIELD_NUMBER = 1; private long inodeId_; /** * optional uint64 inodeId = 1; */ public boolean hasInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 inodeId = 1; */ public long getInodeId() { return inodeId_; } public static final int MTIME_FIELD_NUMBER = 2; private long mtime_; /** * optional uint64 mtime = 2; */ public boolean hasMtime() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 mtime = 2; */ public long getMtime() { return mtime_; } public static final int PATH_FIELD_NUMBER = 3; private volatile java.lang.Object path_; /** * optional string path = 3; */ public boolean hasPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string path = 3; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * optional string path = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, inodeId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, mtime_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, path_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, inodeId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, mtime_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, path_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) obj; if (hasInodeId() != other.hasInodeId()) return false; if (hasInodeId()) { if (getInodeId() != other.getInodeId()) return false; } if (hasMtime() != other.hasMtime()) return false; if (hasMtime()) { if (getMtime() != other.getMtime()) return false; } if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasInodeId()) { hash = (37 * hash) + INODEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getInodeId()); } if (hasMtime()) { hash = (37 * hash) + MTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getMtime()); } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Placeholder type for consistent HDFS operations.
     * 
* * Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.HdfsPathHandleProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); inodeId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); mtime_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); path_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.inodeId_ = inodeId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.mtime_ = mtime_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.path_ = path_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance()) return this; if (other.hasInodeId()) { setInodeId(other.getInodeId()); } if (other.hasMtime()) { setMtime(other.getMtime()); } if (other.hasPath()) { bitField0_ |= 0x00000004; path_ = other.path_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long inodeId_ ; /** * optional uint64 inodeId = 1; */ public boolean hasInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 inodeId = 1; */ public long getInodeId() { return inodeId_; } /** * optional uint64 inodeId = 1; */ public Builder setInodeId(long value) { bitField0_ |= 0x00000001; inodeId_ = value; onChanged(); return this; } /** * optional uint64 inodeId = 1; */ public Builder clearInodeId() { bitField0_ = (bitField0_ & ~0x00000001); inodeId_ = 0L; onChanged(); return this; } private long mtime_ ; /** * optional uint64 mtime = 2; */ public boolean hasMtime() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 mtime = 2; */ public long getMtime() { return mtime_; } /** * optional uint64 mtime = 2; */ public Builder setMtime(long value) { bitField0_ |= 0x00000002; mtime_ = value; onChanged(); return this; } /** * optional uint64 mtime = 2; */ public Builder clearMtime() { bitField0_ = (bitField0_ & ~0x00000002); mtime_ = 0L; onChanged(); return this; } private java.lang.Object path_ = ""; /** * optional string path = 3; */ public boolean hasPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string path = 3; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string path = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string path = 3; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; path_ = value; onChanged(); return this; } /** * optional string path = 3; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000004); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * optional string path = 3; */ public Builder setPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; path_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsPathHandleProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsPathHandleProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public HdfsPathHandleProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new HdfsPathHandleProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface HdfsFileStatusProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.HdfsFileStatusProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ boolean hasFileType(); /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType(); /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; */ boolean hasPath(); /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getPath(); /** * required uint64 length = 3; */ boolean hasLength(); /** * required uint64 length = 3; */ long getLength(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ boolean hasPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); /** * required string owner = 5; */ boolean hasOwner(); /** * required string owner = 5; */ java.lang.String getOwner(); /** * required string owner = 5; */ org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes(); /** * required string group = 6; */ boolean hasGroup(); /** * required string group = 6; */ java.lang.String getGroup(); /** * required string group = 6; */ org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes(); /** * required uint64 modification_time = 7; */ boolean hasModificationTime(); /** * required uint64 modification_time = 7; */ long getModificationTime(); /** * required uint64 access_time = 8; */ boolean hasAccessTime(); /** * required uint64 access_time = 8; */ long getAccessTime(); /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; */ boolean hasSymlink(); /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; */ org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink(); /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; */ boolean hasBlockReplication(); /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; */ int getBlockReplication(); /** * optional uint64 blocksize = 11 [default = 0]; */ boolean hasBlocksize(); /** * optional uint64 blocksize = 11 [default = 0]; */ long getBlocksize(); /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ boolean hasLocations(); /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations(); /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder(); /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; */ boolean hasFileId(); /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; */ long getFileId(); /** * optional int32 childrenNum = 14 [default = -1]; */ boolean hasChildrenNum(); /** * optional int32 childrenNum = 14 [default = -1]; */ int getChildrenNum(); /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ boolean hasFileEncryptionInfo(); /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo(); /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder(); /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ boolean hasStoragePolicy(); /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ int getStoragePolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ boolean hasEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy(); /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder(); /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; */ boolean hasFlags(); /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; */ int getFlags(); } /** *
   **
   * Status of a file, directory or symlink
   * Optionally includes a file's block locations if requested by client on the rpc call.
   * 
* * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto} */ public static final class HdfsFileStatusProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.HdfsFileStatusProto) HdfsFileStatusProtoOrBuilder { private static final long serialVersionUID = 0L; // Use HdfsFileStatusProto.newBuilder() to construct. private HdfsFileStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private HdfsFileStatusProto() { fileType_ = 1; path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; owner_ = ""; group_ = ""; symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; childrenNum_ = -1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private HdfsFileStatusProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; fileType_ = rawValue; } break; } case 18: { bitField0_ |= 0x00000002; path_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; length_ = input.readUInt64(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) != 0)) { subBuilder = permission_.toBuilder(); } permission_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(permission_); permission_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000010; owner_ = bs; break; } case 50: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000020; group_ = bs; break; } case 56: { bitField0_ |= 0x00000040; modificationTime_ = input.readUInt64(); break; } case 64: { bitField0_ |= 0x00000080; accessTime_ = input.readUInt64(); break; } case 74: { bitField0_ |= 0x00000100; symlink_ = input.readBytes(); break; } case 80: { bitField0_ |= 0x00000200; blockReplication_ = input.readUInt32(); break; } case 88: { bitField0_ |= 0x00000400; blocksize_ = input.readUInt64(); break; } case 98: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = null; if (((bitField0_ & 0x00000800) != 0)) { subBuilder = locations_.toBuilder(); } locations_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(locations_); locations_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000800; break; } case 104: { bitField0_ |= 0x00001000; fileId_ = input.readUInt64(); break; } case 112: { bitField0_ |= 0x00002000; childrenNum_ = input.readInt32(); break; } case 122: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00004000) != 0)) { subBuilder = fileEncryptionInfo_.toBuilder(); } fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fileEncryptionInfo_); fileEncryptionInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00004000; break; } case 128: { bitField0_ |= 0x00008000; storagePolicy_ = input.readUInt32(); break; } case 138: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null; if (((bitField0_ & 0x00010000) != 0)) { subBuilder = ecPolicy_.toBuilder(); } ecPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(ecPolicy_); ecPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00010000; break; } case 144: { bitField0_ |= 0x00020000; flags_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.FileType} */ public enum FileType implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * IS_DIR = 1; */ IS_DIR(1), /** * IS_FILE = 2; */ IS_FILE(2), /** * IS_SYMLINK = 3; */ IS_SYMLINK(3), ; /** * IS_DIR = 1; */ public static final int IS_DIR_VALUE = 1; /** * IS_FILE = 2; */ public static final int IS_FILE_VALUE = 2; /** * IS_SYMLINK = 3; */ public static final int IS_SYMLINK_VALUE = 3; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static FileType valueOf(int value) { return forNumber(value); } public static FileType forNumber(int value) { switch (value) { case 1: return IS_DIR; case 2: return IS_FILE; case 3: return IS_SYMLINK; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< FileType> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public FileType findValueByNumber(int number) { return FileType.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0); } private static final FileType[] VALUES = values(); public static FileType valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private FileType(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.FileType) } /** * Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.Flags} */ public enum Flags implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** *
       * has ACLs
       * 
* * HAS_ACL = 1; */ HAS_ACL(1), /** *
       * encrypted
       * 
* * HAS_CRYPT = 2; */ HAS_CRYPT(2), /** *
       * erasure coded
       * 
* * HAS_EC = 4; */ HAS_EC(4), /** *
       * SNAPSHOT ENABLED
       * 
* * SNAPSHOT_ENABLED = 8; */ SNAPSHOT_ENABLED(8), ; /** *
       * has ACLs
       * 
* * HAS_ACL = 1; */ public static final int HAS_ACL_VALUE = 1; /** *
       * encrypted
       * 
* * HAS_CRYPT = 2; */ public static final int HAS_CRYPT_VALUE = 2; /** *
       * erasure coded
       * 
* * HAS_EC = 4; */ public static final int HAS_EC_VALUE = 4; /** *
       * SNAPSHOT ENABLED
       * 
* * SNAPSHOT_ENABLED = 8; */ public static final int SNAPSHOT_ENABLED_VALUE = 8; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static Flags valueOf(int value) { return forNumber(value); } public static Flags forNumber(int value) { switch (value) { case 1: return HAS_ACL; case 2: return HAS_CRYPT; case 4: return HAS_EC; case 8: return SNAPSHOT_ENABLED; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< Flags> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public Flags findValueByNumber(int number) { return Flags.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(1); } private static final Flags[] VALUES = values(); public static Flags valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private Flags(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.Flags) } private int bitField0_; public static final int FILETYPE_FIELD_NUMBER = 1; private int fileType_; /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public boolean hasFileType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(fileType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR : result; } public static final int PATH_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString path_; /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; */ public boolean hasPath() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * local name of inode encoded java UTF8
     * 
* * required bytes path = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPath() { return path_; } public static final int LENGTH_FIELD_NUMBER = 3; private long length_; /** * required uint64 length = 3; */ public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 length = 3; */ public long getLength() { return length_; } public static final int PERMISSION_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public boolean hasPermission() { return ((bitField0_ & 0x00000008) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } public static final int OWNER_FIELD_NUMBER = 5; private volatile java.lang.Object owner_; /** * required string owner = 5; */ public boolean hasOwner() { return ((bitField0_ & 0x00000010) != 0); } /** * required string owner = 5; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } } /** * required string owner = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int GROUP_FIELD_NUMBER = 6; private volatile java.lang.Object group_; /** * required string group = 6; */ public boolean hasGroup() { return ((bitField0_ & 0x00000020) != 0); } /** * required string group = 6; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } } /** * required string group = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int MODIFICATION_TIME_FIELD_NUMBER = 7; private long modificationTime_; /** * required uint64 modification_time = 7; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 modification_time = 7; */ public long getModificationTime() { return modificationTime_; } public static final int ACCESS_TIME_FIELD_NUMBER = 8; private long accessTime_; /** * required uint64 access_time = 8; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000080) != 0); } /** * required uint64 access_time = 8; */ public long getAccessTime() { return accessTime_; } public static final int SYMLINK_FIELD_NUMBER = 9; private org.apache.hadoop.thirdparty.protobuf.ByteString symlink_; /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; */ public boolean hasSymlink() { return ((bitField0_ & 0x00000100) != 0); } /** *
     * Optional fields for symlink
     * 
* * optional bytes symlink = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink() { return symlink_; } public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10; private int blockReplication_; /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; */ public boolean hasBlockReplication() { return ((bitField0_ & 0x00000200) != 0); } /** *
     * Optional fields for file
     * 
* * optional uint32 block_replication = 10 [default = 0]; */ public int getBlockReplication() { return blockReplication_; } public static final int BLOCKSIZE_FIELD_NUMBER = 11; private long blocksize_; /** * optional uint64 blocksize = 11 [default = 0]; */ public boolean hasBlocksize() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 blocksize = 11 [default = 0]; */ public long getBlocksize() { return blocksize_; } public static final int LOCATIONS_FIELD_NUMBER = 12; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public boolean hasLocations() { return ((bitField0_ & 0x00000800) != 0); } /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } /** *
     * suppled only if asked by client
     * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } public static final int FILEID_FIELD_NUMBER = 13; private long fileId_; /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; */ public boolean hasFileId() { return ((bitField0_ & 0x00001000) != 0); } /** *
     * Optional field for fileId
     * 
* * optional uint64 fileId = 13 [default = 0]; */ public long getFileId() { return fileId_; } public static final int CHILDRENNUM_FIELD_NUMBER = 14; private int childrenNum_; /** * optional int32 childrenNum = 14 [default = -1]; */ public boolean hasChildrenNum() { return ((bitField0_ & 0x00002000) != 0); } /** * optional int32 childrenNum = 14 [default = -1]; */ public int getChildrenNum() { return childrenNum_; } public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 15; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00004000) != 0); } /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } /** *
     * Optional field for file encryption
     * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } public static final int STORAGEPOLICY_FIELD_NUMBER = 16; private int storagePolicy_; /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ public boolean hasStoragePolicy() { return ((bitField0_ & 0x00008000) != 0); } /** *
     * block storage policy id
     * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ public int getStoragePolicy() { return storagePolicy_; } public static final int ECPOLICY_FIELD_NUMBER = 17; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public boolean hasEcPolicy() { return ((bitField0_ & 0x00010000) != 0); } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } /** *
     * Optional field for erasure coding
     * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } public static final int FLAGS_FIELD_NUMBER = 18; private int flags_; /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; */ public boolean hasFlags() { return ((bitField0_ & 0x00020000) != 0); } /** *
     * Set of flags
     * 
* * optional uint32 flags = 18 [default = 0]; */ public int getFlags() { return flags_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFileType()) { memoizedIsInitialized = 0; return false; } if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasPermission()) { memoizedIsInitialized = 0; return false; } if (!hasOwner()) { memoizedIsInitialized = 0; return false; } if (!hasGroup()) { memoizedIsInitialized = 0; return false; } if (!hasModificationTime()) { memoizedIsInitialized = 0; return false; } if (!hasAccessTime()) { memoizedIsInitialized = 0; return false; } if (!getPermission().isInitialized()) { memoizedIsInitialized = 0; return false; } if (hasLocations()) { if (!getLocations().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, fileType_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, path_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getPermission()); } if (((bitField0_ & 0x00000010) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, owner_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, group_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt64(7, modificationTime_); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt64(8, accessTime_); } if (((bitField0_ & 0x00000100) != 0)) { output.writeBytes(9, symlink_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeUInt32(10, blockReplication_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeUInt64(11, blocksize_); } if (((bitField0_ & 0x00000800) != 0)) { output.writeMessage(12, getLocations()); } if (((bitField0_ & 0x00001000) != 0)) { output.writeUInt64(13, fileId_); } if (((bitField0_ & 0x00002000) != 0)) { output.writeInt32(14, childrenNum_); } if (((bitField0_ & 0x00004000) != 0)) { output.writeMessage(15, getFileEncryptionInfo()); } if (((bitField0_ & 0x00008000) != 0)) { output.writeUInt32(16, storagePolicy_); } if (((bitField0_ & 0x00010000) != 0)) { output.writeMessage(17, getEcPolicy()); } if (((bitField0_ & 0x00020000) != 0)) { output.writeUInt32(18, flags_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, fileType_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, path_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, length_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getPermission()); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, owner_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, group_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(7, modificationTime_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(8, accessTime_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(9, symlink_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(10, blockReplication_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(11, blocksize_); } if (((bitField0_ & 0x00000800) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(12, getLocations()); } if (((bitField0_ & 0x00001000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(13, fileId_); } if (((bitField0_ & 0x00002000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32Size(14, childrenNum_); } if (((bitField0_ & 0x00004000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(15, getFileEncryptionInfo()); } if (((bitField0_ & 0x00008000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(16, storagePolicy_); } if (((bitField0_ & 0x00010000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(17, getEcPolicy()); } if (((bitField0_ & 0x00020000) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(18, flags_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj; if (hasFileType() != other.hasFileType()) return false; if (hasFileType()) { if (fileType_ != other.fileType_) return false; } if (hasPath() != other.hasPath()) return false; if (hasPath()) { if (!getPath() .equals(other.getPath())) return false; } if (hasLength() != other.hasLength()) return false; if (hasLength()) { if (getLength() != other.getLength()) return false; } if (hasPermission() != other.hasPermission()) return false; if (hasPermission()) { if (!getPermission() .equals(other.getPermission())) return false; } if (hasOwner() != other.hasOwner()) return false; if (hasOwner()) { if (!getOwner() .equals(other.getOwner())) return false; } if (hasGroup() != other.hasGroup()) return false; if (hasGroup()) { if (!getGroup() .equals(other.getGroup())) return false; } if (hasModificationTime() != other.hasModificationTime()) return false; if (hasModificationTime()) { if (getModificationTime() != other.getModificationTime()) return false; } if (hasAccessTime() != other.hasAccessTime()) return false; if (hasAccessTime()) { if (getAccessTime() != other.getAccessTime()) return false; } if (hasSymlink() != other.hasSymlink()) return false; if (hasSymlink()) { if (!getSymlink() .equals(other.getSymlink())) return false; } if (hasBlockReplication() != other.hasBlockReplication()) return false; if (hasBlockReplication()) { if (getBlockReplication() != other.getBlockReplication()) return false; } if (hasBlocksize() != other.hasBlocksize()) return false; if (hasBlocksize()) { if (getBlocksize() != other.getBlocksize()) return false; } if (hasLocations() != other.hasLocations()) return false; if (hasLocations()) { if (!getLocations() .equals(other.getLocations())) return false; } if (hasFileId() != other.hasFileId()) return false; if (hasFileId()) { if (getFileId() != other.getFileId()) return false; } if (hasChildrenNum() != other.hasChildrenNum()) return false; if (hasChildrenNum()) { if (getChildrenNum() != other.getChildrenNum()) return false; } if (hasFileEncryptionInfo() != other.hasFileEncryptionInfo()) return false; if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo() .equals(other.getFileEncryptionInfo())) return false; } if (hasStoragePolicy() != other.hasStoragePolicy()) return false; if (hasStoragePolicy()) { if (getStoragePolicy() != other.getStoragePolicy()) return false; } if (hasEcPolicy() != other.hasEcPolicy()) return false; if (hasEcPolicy()) { if (!getEcPolicy() .equals(other.getEcPolicy())) return false; } if (hasFlags() != other.hasFlags()) return false; if (hasFlags()) { if (getFlags() != other.getFlags()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFileType()) { hash = (37 * hash) + FILETYPE_FIELD_NUMBER; hash = (53 * hash) + fileType_; } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLength()); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + getPermission().hashCode(); } if (hasOwner()) { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } if (hasGroup()) { hash = (37 * hash) + GROUP_FIELD_NUMBER; hash = (53 * hash) + getGroup().hashCode(); } if (hasModificationTime()) { hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getModificationTime()); } if (hasAccessTime()) { hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getAccessTime()); } if (hasSymlink()) { hash = (37 * hash) + SYMLINK_FIELD_NUMBER; hash = (53 * hash) + getSymlink().hashCode(); } if (hasBlockReplication()) { hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getBlockReplication(); } if (hasBlocksize()) { hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlocksize()); } if (hasLocations()) { hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; hash = (53 * hash) + getLocations().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileId()); } if (hasChildrenNum()) { hash = (37 * hash) + CHILDRENNUM_FIELD_NUMBER; hash = (53 * hash) + getChildrenNum(); } if (hasFileEncryptionInfo()) { hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER; hash = (53 * hash) + getFileEncryptionInfo().hashCode(); } if (hasStoragePolicy()) { hash = (37 * hash) + STORAGEPOLICY_FIELD_NUMBER; hash = (53 * hash) + getStoragePolicy(); } if (hasEcPolicy()) { hash = (37 * hash) + ECPOLICY_FIELD_NUMBER; hash = (53 * hash) + getEcPolicy().hashCode(); } if (hasFlags()) { hash = (37 * hash) + FLAGS_FIELD_NUMBER; hash = (53 * hash) + getFlags(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Status of a file, directory or symlink
     * Optionally includes a file's block locations if requested by client on the rpc call.
     * 
* * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.HdfsFileStatusProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPermissionFieldBuilder(); getLocationsFieldBuilder(); getFileEncryptionInfoFieldBuilder(); getEcPolicyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); fileType_ = 1; bitField0_ = (bitField0_ & ~0x00000001); path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); length_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); if (permissionBuilder_ == null) { permission_ = null; } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); owner_ = ""; bitField0_ = (bitField0_ & ~0x00000010); group_ = ""; bitField0_ = (bitField0_ & ~0x00000020); modificationTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); accessTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000080); symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000100); blockReplication_ = 0; bitField0_ = (bitField0_ & ~0x00000200); blocksize_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); if (locationsBuilder_ == null) { locations_ = null; } else { locationsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000800); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00001000); childrenNum_ = -1; bitField0_ = (bitField0_ & ~0x00002000); if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = null; } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00004000); storagePolicy_ = 0; bitField0_ = (bitField0_ & ~0x00008000); if (ecPolicyBuilder_ == null) { ecPolicy_ = null; } else { ecPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00010000); flags_ = 0; bitField0_ = (bitField0_ & ~0x00020000); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.fileType_ = fileType_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.path_ = path_; if (((from_bitField0_ & 0x00000004) != 0)) { result.length_ = length_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { if (permissionBuilder_ == null) { result.permission_ = permission_; } else { result.permission_ = permissionBuilder_.build(); } to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { to_bitField0_ |= 0x00000010; } result.owner_ = owner_; if (((from_bitField0_ & 0x00000020) != 0)) { to_bitField0_ |= 0x00000020; } result.group_ = group_; if (((from_bitField0_ & 0x00000040) != 0)) { result.modificationTime_ = modificationTime_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.accessTime_ = accessTime_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { to_bitField0_ |= 0x00000100; } result.symlink_ = symlink_; if (((from_bitField0_ & 0x00000200) != 0)) { result.blockReplication_ = blockReplication_; to_bitField0_ |= 0x00000200; } if (((from_bitField0_ & 0x00000400) != 0)) { result.blocksize_ = blocksize_; to_bitField0_ |= 0x00000400; } if (((from_bitField0_ & 0x00000800) != 0)) { if (locationsBuilder_ == null) { result.locations_ = locations_; } else { result.locations_ = locationsBuilder_.build(); } to_bitField0_ |= 0x00000800; } if (((from_bitField0_ & 0x00001000) != 0)) { result.fileId_ = fileId_; to_bitField0_ |= 0x00001000; } if (((from_bitField0_ & 0x00002000) != 0)) { to_bitField0_ |= 0x00002000; } result.childrenNum_ = childrenNum_; if (((from_bitField0_ & 0x00004000) != 0)) { if (fileEncryptionInfoBuilder_ == null) { result.fileEncryptionInfo_ = fileEncryptionInfo_; } else { result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build(); } to_bitField0_ |= 0x00004000; } if (((from_bitField0_ & 0x00008000) != 0)) { result.storagePolicy_ = storagePolicy_; to_bitField0_ |= 0x00008000; } if (((from_bitField0_ & 0x00010000) != 0)) { if (ecPolicyBuilder_ == null) { result.ecPolicy_ = ecPolicy_; } else { result.ecPolicy_ = ecPolicyBuilder_.build(); } to_bitField0_ |= 0x00010000; } if (((from_bitField0_ & 0x00020000) != 0)) { result.flags_ = flags_; to_bitField0_ |= 0x00020000; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this; if (other.hasFileType()) { setFileType(other.getFileType()); } if (other.hasPath()) { setPath(other.getPath()); } if (other.hasLength()) { setLength(other.getLength()); } if (other.hasPermission()) { mergePermission(other.getPermission()); } if (other.hasOwner()) { bitField0_ |= 0x00000010; owner_ = other.owner_; onChanged(); } if (other.hasGroup()) { bitField0_ |= 0x00000020; group_ = other.group_; onChanged(); } if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); } if (other.hasAccessTime()) { setAccessTime(other.getAccessTime()); } if (other.hasSymlink()) { setSymlink(other.getSymlink()); } if (other.hasBlockReplication()) { setBlockReplication(other.getBlockReplication()); } if (other.hasBlocksize()) { setBlocksize(other.getBlocksize()); } if (other.hasLocations()) { mergeLocations(other.getLocations()); } if (other.hasFileId()) { setFileId(other.getFileId()); } if (other.hasChildrenNum()) { setChildrenNum(other.getChildrenNum()); } if (other.hasFileEncryptionInfo()) { mergeFileEncryptionInfo(other.getFileEncryptionInfo()); } if (other.hasStoragePolicy()) { setStoragePolicy(other.getStoragePolicy()); } if (other.hasEcPolicy()) { mergeEcPolicy(other.getEcPolicy()); } if (other.hasFlags()) { setFlags(other.getFlags()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFileType()) { return false; } if (!hasPath()) { return false; } if (!hasLength()) { return false; } if (!hasPermission()) { return false; } if (!hasOwner()) { return false; } if (!hasGroup()) { return false; } if (!hasModificationTime()) { return false; } if (!hasAccessTime()) { return false; } if (!getPermission().isInitialized()) { return false; } if (hasLocations()) { if (!getLocations().isInitialized()) { return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { return false; } } if (hasEcPolicy()) { if (!getEcPolicy().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int fileType_ = 1; /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public boolean hasFileType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(fileType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR : result; } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; fileType_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public Builder clearFileType() { bitField0_ = (bitField0_ & ~0x00000001); fileType_ = 1; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; */ public boolean hasPath() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getPath() { return path_; } /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; */ public Builder setPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; path_ = value; onChanged(); return this; } /** *
       * local name of inode encoded java UTF8
       * 
* * required bytes path = 2; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000002); path_ = getDefaultInstance().getPath(); onChanged(); return this; } private long length_ ; /** * required uint64 length = 3; */ public boolean hasLength() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint64 length = 3; */ public long getLength() { return length_; } /** * required uint64 length = 3; */ public Builder setLength(long value) { bitField0_ |= 0x00000004; length_ = value; onChanged(); return this; } /** * required uint64 length = 3; */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000004); length_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_; /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public boolean hasPermission() { return ((bitField0_ & 0x00000008) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { if (permissionBuilder_ == null) { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } else { return permissionBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } permission_ = value; onChanged(); } else { permissionBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder setPermission( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (permissionBuilder_ == null) { permission_ = builderForValue.build(); onChanged(); } else { permissionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && permission_ != null && permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial(); } else { permission_ = value; } onChanged(); } else { permissionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder clearPermission() { if (permissionBuilder_ == null) { permission_ = null; onChanged(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() { bitField0_ |= 0x00000008; onChanged(); return getPermissionFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { if (permissionBuilder_ != null) { return permissionBuilder_.getMessageOrBuilder(); } else { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getPermissionFieldBuilder() { if (permissionBuilder_ == null) { permissionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( getPermission(), getParentForChildren(), isClean()); permission_ = null; } return permissionBuilder_; } private java.lang.Object owner_ = ""; /** * required string owner = 5; */ public boolean hasOwner() { return ((bitField0_ & 0x00000010) != 0); } /** * required string owner = 5; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string owner = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string owner = 5; */ public Builder setOwner( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; owner_ = value; onChanged(); return this; } /** * required string owner = 5; */ public Builder clearOwner() { bitField0_ = (bitField0_ & ~0x00000010); owner_ = getDefaultInstance().getOwner(); onChanged(); return this; } /** * required string owner = 5; */ public Builder setOwnerBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; owner_ = value; onChanged(); return this; } private java.lang.Object group_ = ""; /** * required string group = 6; */ public boolean hasGroup() { return ((bitField0_ & 0x00000020) != 0); } /** * required string group = 6; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string group = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string group = 6; */ public Builder setGroup( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; group_ = value; onChanged(); return this; } /** * required string group = 6; */ public Builder clearGroup() { bitField0_ = (bitField0_ & ~0x00000020); group_ = getDefaultInstance().getGroup(); onChanged(); return this; } /** * required string group = 6; */ public Builder setGroupBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; group_ = value; onChanged(); return this; } private long modificationTime_ ; /** * required uint64 modification_time = 7; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000040) != 0); } /** * required uint64 modification_time = 7; */ public long getModificationTime() { return modificationTime_; } /** * required uint64 modification_time = 7; */ public Builder setModificationTime(long value) { bitField0_ |= 0x00000040; modificationTime_ = value; onChanged(); return this; } /** * required uint64 modification_time = 7; */ public Builder clearModificationTime() { bitField0_ = (bitField0_ & ~0x00000040); modificationTime_ = 0L; onChanged(); return this; } private long accessTime_ ; /** * required uint64 access_time = 8; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000080) != 0); } /** * required uint64 access_time = 8; */ public long getAccessTime() { return accessTime_; } /** * required uint64 access_time = 8; */ public Builder setAccessTime(long value) { bitField0_ |= 0x00000080; accessTime_ = value; onChanged(); return this; } /** * required uint64 access_time = 8; */ public Builder clearAccessTime() { bitField0_ = (bitField0_ & ~0x00000080); accessTime_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; */ public boolean hasSymlink() { return ((bitField0_ & 0x00000100) != 0); } /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink() { return symlink_; } /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; */ public Builder setSymlink(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000100; symlink_ = value; onChanged(); return this; } /** *
       * Optional fields for symlink
       * 
* * optional bytes symlink = 9; */ public Builder clearSymlink() { bitField0_ = (bitField0_ & ~0x00000100); symlink_ = getDefaultInstance().getSymlink(); onChanged(); return this; } private int blockReplication_ ; /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; */ public boolean hasBlockReplication() { return ((bitField0_ & 0x00000200) != 0); } /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; */ public int getBlockReplication() { return blockReplication_; } /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; */ public Builder setBlockReplication(int value) { bitField0_ |= 0x00000200; blockReplication_ = value; onChanged(); return this; } /** *
       * Optional fields for file
       * 
* * optional uint32 block_replication = 10 [default = 0]; */ public Builder clearBlockReplication() { bitField0_ = (bitField0_ & ~0x00000200); blockReplication_ = 0; onChanged(); return this; } private long blocksize_ ; /** * optional uint64 blocksize = 11 [default = 0]; */ public boolean hasBlocksize() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint64 blocksize = 11 [default = 0]; */ public long getBlocksize() { return blocksize_; } /** * optional uint64 blocksize = 11 [default = 0]; */ public Builder setBlocksize(long value) { bitField0_ |= 0x00000400; blocksize_ = value; onChanged(); return this; } /** * optional uint64 blocksize = 11 [default = 0]; */ public Builder clearBlocksize() { bitField0_ = (bitField0_ & ~0x00000400); blocksize_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_; /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public boolean hasLocations() { return ((bitField0_ & 0x00000800) != 0); } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { if (locationsBuilder_ == null) { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } else { return locationsBuilder_.getMessage(); } } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } locations_ = value; onChanged(); } else { locationsBuilder_.setMessage(value); } bitField0_ |= 0x00000800; return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder setLocations( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) { if (locationsBuilder_ == null) { locations_ = builderForValue.build(); onChanged(); } else { locationsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000800; return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (((bitField0_ & 0x00000800) != 0) && locations_ != null && locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial(); } else { locations_ = value; } onChanged(); } else { locationsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000800; return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public Builder clearLocations() { if (locationsBuilder_ == null) { locations_ = null; onChanged(); } else { locationsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000800); return this; } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() { bitField0_ |= 0x00000800; onChanged(); return getLocationsFieldBuilder().getBuilder(); } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { if (locationsBuilder_ != null) { return locationsBuilder_.getMessageOrBuilder(); } else { return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_; } } /** *
       * suppled only if asked by client
       * 
* * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> getLocationsFieldBuilder() { if (locationsBuilder_ == null) { locationsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>( getLocations(), getParentForChildren(), isClean()); locations_ = null; } return locationsBuilder_; } private long fileId_ ; /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; */ public boolean hasFileId() { return ((bitField0_ & 0x00001000) != 0); } /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; */ public long getFileId() { return fileId_; } /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; */ public Builder setFileId(long value) { bitField0_ |= 0x00001000; fileId_ = value; onChanged(); return this; } /** *
       * Optional field for fileId
       * 
* * optional uint64 fileId = 13 [default = 0]; */ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00001000); fileId_ = 0L; onChanged(); return this; } private int childrenNum_ = -1; /** * optional int32 childrenNum = 14 [default = -1]; */ public boolean hasChildrenNum() { return ((bitField0_ & 0x00002000) != 0); } /** * optional int32 childrenNum = 14 [default = -1]; */ public int getChildrenNum() { return childrenNum_; } /** * optional int32 childrenNum = 14 [default = -1]; */ public Builder setChildrenNum(int value) { bitField0_ |= 0x00002000; childrenNum_ = value; onChanged(); return this; } /** * optional int32 childrenNum = 14 [default = -1]; */ public Builder clearChildrenNum() { bitField0_ = (bitField0_ & ~0x00002000); childrenNum_ = -1; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_; /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00004000) != 0); } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } else { return fileEncryptionInfoBuilder_.getMessage(); } } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fileEncryptionInfo_ = value; onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(value); } bitField0_ |= 0x00004000; return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder setFileEncryptionInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = builderForValue.build(); onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00004000; return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (((bitField0_ & 0x00004000) != 0) && fileEncryptionInfo_ != null && fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial(); } else { fileEncryptionInfo_ = value; } onChanged(); } else { fileEncryptionInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00004000; return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public Builder clearFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = null; onChanged(); } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00004000); return this; } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() { bitField0_ |= 0x00004000; onChanged(); return getFileEncryptionInfoFieldBuilder().getBuilder(); } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { if (fileEncryptionInfoBuilder_ != null) { return fileEncryptionInfoBuilder_.getMessageOrBuilder(); } else { return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_; } } /** *
       * Optional field for file encryption
       * 
* * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> getFileEncryptionInfoFieldBuilder() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>( getFileEncryptionInfo(), getParentForChildren(), isClean()); fileEncryptionInfo_ = null; } return fileEncryptionInfoBuilder_; } private int storagePolicy_ ; /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ public boolean hasStoragePolicy() { return ((bitField0_ & 0x00008000) != 0); } /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ public int getStoragePolicy() { return storagePolicy_; } /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ public Builder setStoragePolicy(int value) { bitField0_ |= 0x00008000; storagePolicy_ = value; onChanged(); return this; } /** *
       * block storage policy id
       * 
* * optional uint32 storagePolicy = 16 [default = 0]; */ public Builder clearStoragePolicy() { bitField0_ = (bitField0_ & ~0x00008000); storagePolicy_ = 0; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_; /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public boolean hasEcPolicy() { return ((bitField0_ & 0x00010000) != 0); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() { if (ecPolicyBuilder_ == null) { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } else { return ecPolicyBuilder_.getMessage(); } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ecPolicy_ = value; onChanged(); } else { ecPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00010000; return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder setEcPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (ecPolicyBuilder_ == null) { ecPolicy_ = builderForValue.build(); onChanged(); } else { ecPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00010000; return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (ecPolicyBuilder_ == null) { if (((bitField0_ & 0x00010000) != 0) && ecPolicy_ != null && ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) { ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(ecPolicy_).mergeFrom(value).buildPartial(); } else { ecPolicy_ = value; } onChanged(); } else { ecPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00010000; return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public Builder clearEcPolicy() { if (ecPolicyBuilder_ == null) { ecPolicy_ = null; onChanged(); } else { ecPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00010000); return this; } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() { bitField0_ |= 0x00010000; onChanged(); return getEcPolicyFieldBuilder().getBuilder(); } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() { if (ecPolicyBuilder_ != null) { return ecPolicyBuilder_.getMessageOrBuilder(); } else { return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_; } } /** *
       * Optional field for erasure coding
       * 
* * optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> getEcPolicyFieldBuilder() { if (ecPolicyBuilder_ == null) { ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>( getEcPolicy(), getParentForChildren(), isClean()); ecPolicy_ = null; } return ecPolicyBuilder_; } private int flags_ ; /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; */ public boolean hasFlags() { return ((bitField0_ & 0x00020000) != 0); } /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; */ public int getFlags() { return flags_; } /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; */ public Builder setFlags(int value) { bitField0_ |= 0x00020000; flags_ = value; onChanged(); return this; } /** *
       * Set of flags
       * 
* * optional uint32 flags = 18 [default = 0]; */ public Builder clearFlags() { bitField0_ = (bitField0_ & ~0x00020000); flags_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsFileStatusProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsFileStatusProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public HdfsFileStatusProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new HdfsFileStatusProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockChecksumOptionsProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockChecksumOptionsProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ boolean hasBlockChecksumType(); /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType(); /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; */ boolean hasStripeLength(); /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; */ long getStripeLength(); } /** *
   **
   * Algorithms/types denoting how block-level checksums are computed using
   * lower-level chunk checksums/CRCs.
   * These options should be kept in sync with
   * org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto} */ public static final class BlockChecksumOptionsProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockChecksumOptionsProto) BlockChecksumOptionsProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockChecksumOptionsProto.newBuilder() to construct. private BlockChecksumOptionsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockChecksumOptionsProto() { blockChecksumType_ = 1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockChecksumOptionsProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; blockChecksumType_ = rawValue; } break; } case 16: { bitField0_ |= 0x00000002; stripeLength_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class); } private int bitField0_; public static final int BLOCKCHECKSUMTYPE_FIELD_NUMBER = 1; private int blockChecksumType_; /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ public boolean hasBlockChecksumType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.valueOf(blockChecksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC : result; } public static final int STRIPELENGTH_FIELD_NUMBER = 2; private long stripeLength_; /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; */ public boolean hasStripeLength() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * 
* * optional uint64 stripeLength = 2; */ public long getStripeLength() { return stripeLength_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, blockChecksumType_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, stripeLength_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, blockChecksumType_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, stripeLength_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) obj; if (hasBlockChecksumType() != other.hasBlockChecksumType()) return false; if (hasBlockChecksumType()) { if (blockChecksumType_ != other.blockChecksumType_) return false; } if (hasStripeLength() != other.hasStripeLength()) return false; if (hasStripeLength()) { if (getStripeLength() != other.getStripeLength()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockChecksumType()) { hash = (37 * hash) + BLOCKCHECKSUMTYPE_FIELD_NUMBER; hash = (53 * hash) + blockChecksumType_; } if (hasStripeLength()) { hash = (37 * hash) + STRIPELENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getStripeLength()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Algorithms/types denoting how block-level checksums are computed using
     * lower-level chunk checksums/CRCs.
     * These options should be kept in sync with
     * org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockChecksumOptionsProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); blockChecksumType_ = 1; bitField0_ = (bitField0_ & ~0x00000001); stripeLength_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.blockChecksumType_ = blockChecksumType_; if (((from_bitField0_ & 0x00000002) != 0)) { result.stripeLength_ = stripeLength_; to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) return this; if (other.hasBlockChecksumType()) { setBlockChecksumType(other.getBlockChecksumType()); } if (other.hasStripeLength()) { setStripeLength(other.getStripeLength()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int blockChecksumType_ = 1; /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ public boolean hasBlockChecksumType() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.valueOf(blockChecksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC : result; } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ public Builder setBlockChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockChecksumType_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC]; */ public Builder clearBlockChecksumType() { bitField0_ = (bitField0_ & ~0x00000001); blockChecksumType_ = 1; onChanged(); return this; } private long stripeLength_ ; /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; */ public boolean hasStripeLength() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; */ public long getStripeLength() { return stripeLength_; } /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; */ public Builder setStripeLength(long value) { bitField0_ |= 0x00000002; stripeLength_ = value; onChanged(); return this; } /** *
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * 
* * optional uint64 stripeLength = 2; */ public Builder clearStripeLength() { bitField0_ = (bitField0_ & ~0x00000002); stripeLength_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockChecksumOptionsProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockChecksumOptionsProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockChecksumOptionsProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new BlockChecksumOptionsProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface FsServerDefaultsProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.FsServerDefaultsProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 blockSize = 1; */ boolean hasBlockSize(); /** * required uint64 blockSize = 1; */ long getBlockSize(); /** * required uint32 bytesPerChecksum = 2; */ boolean hasBytesPerChecksum(); /** * required uint32 bytesPerChecksum = 2; */ int getBytesPerChecksum(); /** * required uint32 writePacketSize = 3; */ boolean hasWritePacketSize(); /** * required uint32 writePacketSize = 3; */ int getWritePacketSize(); /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; */ boolean hasReplication(); /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; */ int getReplication(); /** * required uint32 fileBufferSize = 5; */ boolean hasFileBufferSize(); /** * required uint32 fileBufferSize = 5; */ int getFileBufferSize(); /** * optional bool encryptDataTransfer = 6 [default = false]; */ boolean hasEncryptDataTransfer(); /** * optional bool encryptDataTransfer = 6 [default = false]; */ boolean getEncryptDataTransfer(); /** * optional uint64 trashInterval = 7 [default = 0]; */ boolean hasTrashInterval(); /** * optional uint64 trashInterval = 7 [default = 0]; */ long getTrashInterval(); /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ boolean hasChecksumType(); /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType(); /** * optional string keyProviderUri = 9; */ boolean hasKeyProviderUri(); /** * optional string keyProviderUri = 9; */ java.lang.String getKeyProviderUri(); /** * optional string keyProviderUri = 9; */ org.apache.hadoop.thirdparty.protobuf.ByteString getKeyProviderUriBytes(); /** * optional uint32 policyId = 10 [default = 0]; */ boolean hasPolicyId(); /** * optional uint32 policyId = 10 [default = 0]; */ int getPolicyId(); } /** *
   **
   * HDFS Server Defaults
   * 
* * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto} */ public static final class FsServerDefaultsProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.FsServerDefaultsProto) FsServerDefaultsProtoOrBuilder { private static final long serialVersionUID = 0L; // Use FsServerDefaultsProto.newBuilder() to construct. private FsServerDefaultsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FsServerDefaultsProto() { checksumType_ = 1; keyProviderUri_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FsServerDefaultsProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; blockSize_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; bytesPerChecksum_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; writePacketSize_ = input.readUInt32(); break; } case 32: { bitField0_ |= 0x00000008; replication_ = input.readUInt32(); break; } case 40: { bitField0_ |= 0x00000010; fileBufferSize_ = input.readUInt32(); break; } case 48: { bitField0_ |= 0x00000020; encryptDataTransfer_ = input.readBool(); break; } case 56: { bitField0_ |= 0x00000040; trashInterval_ = input.readUInt64(); break; } case 64: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(8, rawValue); } else { bitField0_ |= 0x00000080; checksumType_ = rawValue; } break; } case 74: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000100; keyProviderUri_ = bs; break; } case 80: { bitField0_ |= 0x00000200; policyId_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); } private int bitField0_; public static final int BLOCKSIZE_FIELD_NUMBER = 1; private long blockSize_; /** * required uint64 blockSize = 1; */ public boolean hasBlockSize() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockSize = 1; */ public long getBlockSize() { return blockSize_; } public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2; private int bytesPerChecksum_; /** * required uint32 bytesPerChecksum = 2; */ public boolean hasBytesPerChecksum() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 bytesPerChecksum = 2; */ public int getBytesPerChecksum() { return bytesPerChecksum_; } public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3; private int writePacketSize_; /** * required uint32 writePacketSize = 3; */ public boolean hasWritePacketSize() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 writePacketSize = 3; */ public int getWritePacketSize() { return writePacketSize_; } public static final int REPLICATION_FIELD_NUMBER = 4; private int replication_; /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; */ public boolean hasReplication() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * Actually a short - only 16 bits used
     * 
* * required uint32 replication = 4; */ public int getReplication() { return replication_; } public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5; private int fileBufferSize_; /** * required uint32 fileBufferSize = 5; */ public boolean hasFileBufferSize() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint32 fileBufferSize = 5; */ public int getFileBufferSize() { return fileBufferSize_; } public static final int ENCRYPTDATATRANSFER_FIELD_NUMBER = 6; private boolean encryptDataTransfer_; /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean hasEncryptDataTransfer() { return ((bitField0_ & 0x00000020) != 0); } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean getEncryptDataTransfer() { return encryptDataTransfer_; } public static final int TRASHINTERVAL_FIELD_NUMBER = 7; private long trashInterval_; /** * optional uint64 trashInterval = 7 [default = 0]; */ public boolean hasTrashInterval() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint64 trashInterval = 7 [default = 0]; */ public long getTrashInterval() { return trashInterval_; } public static final int CHECKSUMTYPE_FIELD_NUMBER = 8; private int checksumType_; /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public boolean hasChecksumType() { return ((bitField0_ & 0x00000080) != 0); } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(checksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32 : result; } public static final int KEYPROVIDERURI_FIELD_NUMBER = 9; private volatile java.lang.Object keyProviderUri_; /** * optional string keyProviderUri = 9; */ public boolean hasKeyProviderUri() { return ((bitField0_ & 0x00000100) != 0); } /** * optional string keyProviderUri = 9; */ public java.lang.String getKeyProviderUri() { java.lang.Object ref = keyProviderUri_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyProviderUri_ = s; } return s; } } /** * optional string keyProviderUri = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyProviderUriBytes() { java.lang.Object ref = keyProviderUri_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyProviderUri_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int POLICYID_FIELD_NUMBER = 10; private int policyId_; /** * optional uint32 policyId = 10 [default = 0]; */ public boolean hasPolicyId() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint32 policyId = 10 [default = 0]; */ public int getPolicyId() { return policyId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasBlockSize()) { memoizedIsInitialized = 0; return false; } if (!hasBytesPerChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasWritePacketSize()) { memoizedIsInitialized = 0; return false; } if (!hasReplication()) { memoizedIsInitialized = 0; return false; } if (!hasFileBufferSize()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, blockSize_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, bytesPerChecksum_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, writePacketSize_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, replication_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt32(5, fileBufferSize_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeBool(6, encryptDataTransfer_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt64(7, trashInterval_); } if (((bitField0_ & 0x00000080) != 0)) { output.writeEnum(8, checksumType_); } if (((bitField0_ & 0x00000100) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 9, keyProviderUri_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeUInt32(10, policyId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, blockSize_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, bytesPerChecksum_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, writePacketSize_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, replication_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(5, fileBufferSize_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(6, encryptDataTransfer_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(7, trashInterval_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(8, checksumType_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(9, keyProviderUri_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(10, policyId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj; if (hasBlockSize() != other.hasBlockSize()) return false; if (hasBlockSize()) { if (getBlockSize() != other.getBlockSize()) return false; } if (hasBytesPerChecksum() != other.hasBytesPerChecksum()) return false; if (hasBytesPerChecksum()) { if (getBytesPerChecksum() != other.getBytesPerChecksum()) return false; } if (hasWritePacketSize() != other.hasWritePacketSize()) return false; if (hasWritePacketSize()) { if (getWritePacketSize() != other.getWritePacketSize()) return false; } if (hasReplication() != other.hasReplication()) return false; if (hasReplication()) { if (getReplication() != other.getReplication()) return false; } if (hasFileBufferSize() != other.hasFileBufferSize()) return false; if (hasFileBufferSize()) { if (getFileBufferSize() != other.getFileBufferSize()) return false; } if (hasEncryptDataTransfer() != other.hasEncryptDataTransfer()) return false; if (hasEncryptDataTransfer()) { if (getEncryptDataTransfer() != other.getEncryptDataTransfer()) return false; } if (hasTrashInterval() != other.hasTrashInterval()) return false; if (hasTrashInterval()) { if (getTrashInterval() != other.getTrashInterval()) return false; } if (hasChecksumType() != other.hasChecksumType()) return false; if (hasChecksumType()) { if (checksumType_ != other.checksumType_) return false; } if (hasKeyProviderUri() != other.hasKeyProviderUri()) return false; if (hasKeyProviderUri()) { if (!getKeyProviderUri() .equals(other.getKeyProviderUri())) return false; } if (hasPolicyId() != other.hasPolicyId()) return false; if (hasPolicyId()) { if (getPolicyId() != other.getPolicyId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockSize()) { hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockSize()); } if (hasBytesPerChecksum()) { hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getBytesPerChecksum(); } if (hasWritePacketSize()) { hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER; hash = (53 * hash) + getWritePacketSize(); } if (hasReplication()) { hash = (37 * hash) + REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getReplication(); } if (hasFileBufferSize()) { hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER; hash = (53 * hash) + getFileBufferSize(); } if (hasEncryptDataTransfer()) { hash = (37 * hash) + ENCRYPTDATATRANSFER_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getEncryptDataTransfer()); } if (hasTrashInterval()) { hash = (37 * hash) + TRASHINTERVAL_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getTrashInterval()); } if (hasChecksumType()) { hash = (37 * hash) + CHECKSUMTYPE_FIELD_NUMBER; hash = (53 * hash) + checksumType_; } if (hasKeyProviderUri()) { hash = (37 * hash) + KEYPROVIDERURI_FIELD_NUMBER; hash = (53 * hash) + getKeyProviderUri().hashCode(); } if (hasPolicyId()) { hash = (37 * hash) + POLICYID_FIELD_NUMBER; hash = (53 * hash) + getPolicyId(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * HDFS Server Defaults
     * 
* * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.FsServerDefaultsProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); blockSize_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); bytesPerChecksum_ = 0; bitField0_ = (bitField0_ & ~0x00000002); writePacketSize_ = 0; bitField0_ = (bitField0_ & ~0x00000004); replication_ = 0; bitField0_ = (bitField0_ & ~0x00000008); fileBufferSize_ = 0; bitField0_ = (bitField0_ & ~0x00000010); encryptDataTransfer_ = false; bitField0_ = (bitField0_ & ~0x00000020); trashInterval_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); checksumType_ = 1; bitField0_ = (bitField0_ & ~0x00000080); keyProviderUri_ = ""; bitField0_ = (bitField0_ & ~0x00000100); policyId_ = 0; bitField0_ = (bitField0_ & ~0x00000200); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.blockSize_ = blockSize_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.bytesPerChecksum_ = bytesPerChecksum_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.writePacketSize_ = writePacketSize_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.replication_ = replication_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.fileBufferSize_ = fileBufferSize_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.encryptDataTransfer_ = encryptDataTransfer_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.trashInterval_ = trashInterval_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { to_bitField0_ |= 0x00000080; } result.checksumType_ = checksumType_; if (((from_bitField0_ & 0x00000100) != 0)) { to_bitField0_ |= 0x00000100; } result.keyProviderUri_ = keyProviderUri_; if (((from_bitField0_ & 0x00000200) != 0)) { result.policyId_ = policyId_; to_bitField0_ |= 0x00000200; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this; if (other.hasBlockSize()) { setBlockSize(other.getBlockSize()); } if (other.hasBytesPerChecksum()) { setBytesPerChecksum(other.getBytesPerChecksum()); } if (other.hasWritePacketSize()) { setWritePacketSize(other.getWritePacketSize()); } if (other.hasReplication()) { setReplication(other.getReplication()); } if (other.hasFileBufferSize()) { setFileBufferSize(other.getFileBufferSize()); } if (other.hasEncryptDataTransfer()) { setEncryptDataTransfer(other.getEncryptDataTransfer()); } if (other.hasTrashInterval()) { setTrashInterval(other.getTrashInterval()); } if (other.hasChecksumType()) { setChecksumType(other.getChecksumType()); } if (other.hasKeyProviderUri()) { bitField0_ |= 0x00000100; keyProviderUri_ = other.keyProviderUri_; onChanged(); } if (other.hasPolicyId()) { setPolicyId(other.getPolicyId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasBlockSize()) { return false; } if (!hasBytesPerChecksum()) { return false; } if (!hasWritePacketSize()) { return false; } if (!hasReplication()) { return false; } if (!hasFileBufferSize()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long blockSize_ ; /** * required uint64 blockSize = 1; */ public boolean hasBlockSize() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockSize = 1; */ public long getBlockSize() { return blockSize_; } /** * required uint64 blockSize = 1; */ public Builder setBlockSize(long value) { bitField0_ |= 0x00000001; blockSize_ = value; onChanged(); return this; } /** * required uint64 blockSize = 1; */ public Builder clearBlockSize() { bitField0_ = (bitField0_ & ~0x00000001); blockSize_ = 0L; onChanged(); return this; } private int bytesPerChecksum_ ; /** * required uint32 bytesPerChecksum = 2; */ public boolean hasBytesPerChecksum() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 bytesPerChecksum = 2; */ public int getBytesPerChecksum() { return bytesPerChecksum_; } /** * required uint32 bytesPerChecksum = 2; */ public Builder setBytesPerChecksum(int value) { bitField0_ |= 0x00000002; bytesPerChecksum_ = value; onChanged(); return this; } /** * required uint32 bytesPerChecksum = 2; */ public Builder clearBytesPerChecksum() { bitField0_ = (bitField0_ & ~0x00000002); bytesPerChecksum_ = 0; onChanged(); return this; } private int writePacketSize_ ; /** * required uint32 writePacketSize = 3; */ public boolean hasWritePacketSize() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 writePacketSize = 3; */ public int getWritePacketSize() { return writePacketSize_; } /** * required uint32 writePacketSize = 3; */ public Builder setWritePacketSize(int value) { bitField0_ |= 0x00000004; writePacketSize_ = value; onChanged(); return this; } /** * required uint32 writePacketSize = 3; */ public Builder clearWritePacketSize() { bitField0_ = (bitField0_ & ~0x00000004); writePacketSize_ = 0; onChanged(); return this; } private int replication_ ; /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; */ public boolean hasReplication() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; */ public int getReplication() { return replication_; } /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; */ public Builder setReplication(int value) { bitField0_ |= 0x00000008; replication_ = value; onChanged(); return this; } /** *
       * Actually a short - only 16 bits used
       * 
* * required uint32 replication = 4; */ public Builder clearReplication() { bitField0_ = (bitField0_ & ~0x00000008); replication_ = 0; onChanged(); return this; } private int fileBufferSize_ ; /** * required uint32 fileBufferSize = 5; */ public boolean hasFileBufferSize() { return ((bitField0_ & 0x00000010) != 0); } /** * required uint32 fileBufferSize = 5; */ public int getFileBufferSize() { return fileBufferSize_; } /** * required uint32 fileBufferSize = 5; */ public Builder setFileBufferSize(int value) { bitField0_ |= 0x00000010; fileBufferSize_ = value; onChanged(); return this; } /** * required uint32 fileBufferSize = 5; */ public Builder clearFileBufferSize() { bitField0_ = (bitField0_ & ~0x00000010); fileBufferSize_ = 0; onChanged(); return this; } private boolean encryptDataTransfer_ ; /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean hasEncryptDataTransfer() { return ((bitField0_ & 0x00000020) != 0); } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean getEncryptDataTransfer() { return encryptDataTransfer_; } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public Builder setEncryptDataTransfer(boolean value) { bitField0_ |= 0x00000020; encryptDataTransfer_ = value; onChanged(); return this; } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public Builder clearEncryptDataTransfer() { bitField0_ = (bitField0_ & ~0x00000020); encryptDataTransfer_ = false; onChanged(); return this; } private long trashInterval_ ; /** * optional uint64 trashInterval = 7 [default = 0]; */ public boolean hasTrashInterval() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint64 trashInterval = 7 [default = 0]; */ public long getTrashInterval() { return trashInterval_; } /** * optional uint64 trashInterval = 7 [default = 0]; */ public Builder setTrashInterval(long value) { bitField0_ |= 0x00000040; trashInterval_ = value; onChanged(); return this; } /** * optional uint64 trashInterval = 7 [default = 0]; */ public Builder clearTrashInterval() { bitField0_ = (bitField0_ & ~0x00000040); trashInterval_ = 0L; onChanged(); return this; } private int checksumType_ = 1; /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public boolean hasChecksumType() { return ((bitField0_ & 0x00000080) != 0); } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(checksumType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32 : result; } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public Builder setChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000080; checksumType_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public Builder clearChecksumType() { bitField0_ = (bitField0_ & ~0x00000080); checksumType_ = 1; onChanged(); return this; } private java.lang.Object keyProviderUri_ = ""; /** * optional string keyProviderUri = 9; */ public boolean hasKeyProviderUri() { return ((bitField0_ & 0x00000100) != 0); } /** * optional string keyProviderUri = 9; */ public java.lang.String getKeyProviderUri() { java.lang.Object ref = keyProviderUri_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyProviderUri_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string keyProviderUri = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKeyProviderUriBytes() { java.lang.Object ref = keyProviderUri_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyProviderUri_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string keyProviderUri = 9; */ public Builder setKeyProviderUri( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000100; keyProviderUri_ = value; onChanged(); return this; } /** * optional string keyProviderUri = 9; */ public Builder clearKeyProviderUri() { bitField0_ = (bitField0_ & ~0x00000100); keyProviderUri_ = getDefaultInstance().getKeyProviderUri(); onChanged(); return this; } /** * optional string keyProviderUri = 9; */ public Builder setKeyProviderUriBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000100; keyProviderUri_ = value; onChanged(); return this; } private int policyId_ ; /** * optional uint32 policyId = 10 [default = 0]; */ public boolean hasPolicyId() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint32 policyId = 10 [default = 0]; */ public int getPolicyId() { return policyId_; } /** * optional uint32 policyId = 10 [default = 0]; */ public Builder setPolicyId(int value) { bitField0_ |= 0x00000200; policyId_ = value; onChanged(); return this; } /** * optional uint32 policyId = 10 [default = 0]; */ public Builder clearPolicyId() { bitField0_ = (bitField0_ & ~0x00000200); policyId_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsServerDefaultsProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FsServerDefaultsProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FsServerDefaultsProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new FsServerDefaultsProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DirectoryListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DirectoryListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ int getPartialListingCount(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingOrBuilderList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index); /** * required uint32 remainingEntries = 2; */ boolean hasRemainingEntries(); /** * required uint32 remainingEntries = 2; */ int getRemainingEntries(); } /** *
   **
   * Directory listing
   * 
* * Protobuf type {@code hadoop.hdfs.DirectoryListingProto} */ public static final class DirectoryListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.DirectoryListingProto) DirectoryListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DirectoryListingProto.newBuilder() to construct. private DirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DirectoryListingProto() { partialListing_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DirectoryListingProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { partialListing_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } partialListing_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry)); break; } case 16: { bitField0_ |= 0x00000001; remainingEntries_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); } private int bitField0_; public static final int PARTIALLISTING_FIELD_NUMBER = 1; private java.util.List partialListing_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { return partialListing_.size(); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { return partialListing_.get(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { return partialListing_.get(index); } public static final int REMAININGENTRIES_FIELD_NUMBER = 2; private int remainingEntries_; /** * required uint32 remainingEntries = 2; */ public boolean hasRemainingEntries() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 remainingEntries = 2; */ public int getRemainingEntries() { return remainingEntries_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasRemainingEntries()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < partialListing_.size(); i++) { output.writeMessage(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(2, remainingEntries_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < partialListing_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, remainingEntries_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) obj; if (!getPartialListingList() .equals(other.getPartialListingList())) return false; if (hasRemainingEntries() != other.hasRemainingEntries()) return false; if (hasRemainingEntries()) { if (getRemainingEntries() != other.getRemainingEntries()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getPartialListingCount() > 0) { hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER; hash = (53 * hash) + getPartialListingList().hashCode(); } if (hasRemainingEntries()) { hash = (37 * hash) + REMAININGENTRIES_FIELD_NUMBER; hash = (53 * hash) + getRemainingEntries(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Directory listing
     * 
* * Protobuf type {@code hadoop.hdfs.DirectoryListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DirectoryListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPartialListingFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { partialListingBuilder_.clear(); } remainingEntries_ = 0; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (partialListingBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.partialListing_ = partialListing_; } else { result.partialListing_ = partialListingBuilder_.build(); } if (((from_bitField0_ & 0x00000002) != 0)) { result.remainingEntries_ = remainingEntries_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) return this; if (partialListingBuilder_ == null) { if (!other.partialListing_.isEmpty()) { if (partialListing_.isEmpty()) { partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensurePartialListingIsMutable(); partialListing_.addAll(other.partialListing_); } onChanged(); } } else { if (!other.partialListing_.isEmpty()) { if (partialListingBuilder_.isEmpty()) { partialListingBuilder_.dispose(); partialListingBuilder_ = null; partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); partialListingBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getPartialListingFieldBuilder() : null; } else { partialListingBuilder_.addAllMessages(other.partialListing_); } } } if (other.hasRemainingEntries()) { setRemainingEntries(other.getRemainingEntries()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasRemainingEntries()) { return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List partialListing_ = java.util.Collections.emptyList(); private void ensurePartialListingIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { partialListing_ = new java.util.ArrayList(partialListing_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { if (partialListingBuilder_ == null) { return java.util.Collections.unmodifiableList(partialListing_); } else { return partialListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { if (partialListingBuilder_ == null) { return partialListing_.size(); } else { return partialListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.set(index, value); onChanged(); } else { partialListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.set(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(value); onChanged(); } else { partialListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(index, value); onChanged(); } else { partialListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addAllPartialListing( java.lang.Iterable values) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, partialListing_); onChanged(); } else { partialListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder clearPartialListing() { if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { partialListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder removePartialListing(int index) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.remove(index); onChanged(); } else { partialListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder( int index) { return getPartialListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { if (partialListingBuilder_ != null) { return partialListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(partialListing_); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() { return getPartialListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder( int index) { return getPartialListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingBuilderList() { return getPartialListingFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getPartialListingFieldBuilder() { if (partialListingBuilder_ == null) { partialListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( partialListing_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); partialListing_ = null; } return partialListingBuilder_; } private int remainingEntries_ ; /** * required uint32 remainingEntries = 2; */ public boolean hasRemainingEntries() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 remainingEntries = 2; */ public int getRemainingEntries() { return remainingEntries_; } /** * required uint32 remainingEntries = 2; */ public Builder setRemainingEntries(int value) { bitField0_ |= 0x00000002; remainingEntries_ = value; onChanged(); return this; } /** * required uint32 remainingEntries = 2; */ public Builder clearRemainingEntries() { bitField0_ = (bitField0_ & ~0x00000002); remainingEntries_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DirectoryListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DirectoryListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DirectoryListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DirectoryListingProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RemoteExceptionProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RemoteExceptionProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string className = 1; */ boolean hasClassName(); /** * required string className = 1; */ java.lang.String getClassName(); /** * required string className = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getClassNameBytes(); /** * optional string message = 2; */ boolean hasMessage(); /** * optional string message = 2; */ java.lang.String getMessage(); /** * optional string message = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getMessageBytes(); } /** * Protobuf type {@code hadoop.hdfs.RemoteExceptionProto} */ public static final class RemoteExceptionProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.RemoteExceptionProto) RemoteExceptionProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RemoteExceptionProto.newBuilder() to construct. private RemoteExceptionProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RemoteExceptionProto() { className_ = ""; message_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RemoteExceptionProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; className_ = bs; break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; message_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder.class); } private int bitField0_; public static final int CLASSNAME_FIELD_NUMBER = 1; private volatile java.lang.Object className_; /** * required string className = 1; */ public boolean hasClassName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string className = 1; */ public java.lang.String getClassName() { java.lang.Object ref = className_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { className_ = s; } return s; } } /** * required string className = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getClassNameBytes() { java.lang.Object ref = className_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); className_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int MESSAGE_FIELD_NUMBER = 2; private volatile java.lang.Object message_; /** * optional string message = 2; */ public boolean hasMessage() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string message = 2; */ public java.lang.String getMessage() { java.lang.Object ref = message_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { message_ = s; } return s; } } /** * optional string message = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getMessageBytes() { java.lang.Object ref = message_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); message_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasClassName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, className_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, message_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, className_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, message_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto) obj; if (hasClassName() != other.hasClassName()) return false; if (hasClassName()) { if (!getClassName() .equals(other.getClassName())) return false; } if (hasMessage() != other.hasMessage()) return false; if (hasMessage()) { if (!getMessage() .equals(other.getMessage())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasClassName()) { hash = (37 * hash) + CLASSNAME_FIELD_NUMBER; hash = (53 * hash) + getClassName().hashCode(); } if (hasMessage()) { hash = (37 * hash) + MESSAGE_FIELD_NUMBER; hash = (53 * hash) + getMessage().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoteExceptionProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RemoteExceptionProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); className_ = ""; bitField0_ = (bitField0_ & ~0x00000001); message_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.className_ = className_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.message_ = message_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance()) return this; if (other.hasClassName()) { bitField0_ |= 0x00000001; className_ = other.className_; onChanged(); } if (other.hasMessage()) { bitField0_ |= 0x00000002; message_ = other.message_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasClassName()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object className_ = ""; /** * required string className = 1; */ public boolean hasClassName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string className = 1; */ public java.lang.String getClassName() { java.lang.Object ref = className_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { className_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string className = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getClassNameBytes() { java.lang.Object ref = className_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); className_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string className = 1; */ public Builder setClassName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; className_ = value; onChanged(); return this; } /** * required string className = 1; */ public Builder clearClassName() { bitField0_ = (bitField0_ & ~0x00000001); className_ = getDefaultInstance().getClassName(); onChanged(); return this; } /** * required string className = 1; */ public Builder setClassNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; className_ = value; onChanged(); return this; } private java.lang.Object message_ = ""; /** * optional string message = 2; */ public boolean hasMessage() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string message = 2; */ public java.lang.String getMessage() { java.lang.Object ref = message_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { message_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string message = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getMessageBytes() { java.lang.Object ref = message_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); message_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string message = 2; */ public Builder setMessage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; message_ = value; onChanged(); return this; } /** * optional string message = 2; */ public Builder clearMessage() { bitField0_ = (bitField0_ & ~0x00000002); message_ = getDefaultInstance().getMessage(); onChanged(); return this; } /** * optional string message = 2; */ public Builder setMessageBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; message_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoteExceptionProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoteExceptionProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RemoteExceptionProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new RemoteExceptionProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BatchedDirectoryListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BatchedDirectoryListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ int getPartialListingCount(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingOrBuilderList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index); /** * required uint32 parentIdx = 2; */ boolean hasParentIdx(); /** * required uint32 parentIdx = 2; */ int getParentIdx(); /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ boolean hasException(); /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException(); /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder(); } /** *
   * Directory listing result for a batched listing call.
   * 
* * Protobuf type {@code hadoop.hdfs.BatchedDirectoryListingProto} */ public static final class BatchedDirectoryListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BatchedDirectoryListingProto) BatchedDirectoryListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BatchedDirectoryListingProto.newBuilder() to construct. private BatchedDirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BatchedDirectoryListingProto() { partialListing_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BatchedDirectoryListingProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { partialListing_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } partialListing_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry)); break; } case 16: { bitField0_ |= 0x00000001; parentIdx_ = input.readUInt32(); break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) != 0)) { subBuilder = exception_.toBuilder(); } exception_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(exception_); exception_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.Builder.class); } private int bitField0_; public static final int PARTIALLISTING_FIELD_NUMBER = 1; private java.util.List partialListing_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { return partialListing_.size(); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { return partialListing_.get(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { return partialListing_.get(index); } public static final int PARENTIDX_FIELD_NUMBER = 2; private int parentIdx_; /** * required uint32 parentIdx = 2; */ public boolean hasParentIdx() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint32 parentIdx = 2; */ public int getParentIdx() { return parentIdx_; } public static final int EXCEPTION_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto exception_; /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public boolean hasException() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException() { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder() { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasParentIdx()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasException()) { if (!getException().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < partialListing_.size(); i++) { output.writeMessage(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(2, parentIdx_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(3, getException()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < partialListing_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, parentIdx_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, getException()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto) obj; if (!getPartialListingList() .equals(other.getPartialListingList())) return false; if (hasParentIdx() != other.hasParentIdx()) return false; if (hasParentIdx()) { if (getParentIdx() != other.getParentIdx()) return false; } if (hasException() != other.hasException()) return false; if (hasException()) { if (!getException() .equals(other.getException())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getPartialListingCount() > 0) { hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER; hash = (53 * hash) + getPartialListingList().hashCode(); } if (hasParentIdx()) { hash = (37 * hash) + PARENTIDX_FIELD_NUMBER; hash = (53 * hash) + getParentIdx(); } if (hasException()) { hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; hash = (53 * hash) + getException().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     * Directory listing result for a batched listing call.
     * 
* * Protobuf type {@code hadoop.hdfs.BatchedDirectoryListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BatchedDirectoryListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPartialListingFieldBuilder(); getExceptionFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { partialListingBuilder_.clear(); } parentIdx_ = 0; bitField0_ = (bitField0_ & ~0x00000002); if (exceptionBuilder_ == null) { exception_ = null; } else { exceptionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (partialListingBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.partialListing_ = partialListing_; } else { result.partialListing_ = partialListingBuilder_.build(); } if (((from_bitField0_ & 0x00000002) != 0)) { result.parentIdx_ = parentIdx_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000004) != 0)) { if (exceptionBuilder_ == null) { result.exception_ = exception_; } else { result.exception_ = exceptionBuilder_.build(); } to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.getDefaultInstance()) return this; if (partialListingBuilder_ == null) { if (!other.partialListing_.isEmpty()) { if (partialListing_.isEmpty()) { partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensurePartialListingIsMutable(); partialListing_.addAll(other.partialListing_); } onChanged(); } } else { if (!other.partialListing_.isEmpty()) { if (partialListingBuilder_.isEmpty()) { partialListingBuilder_.dispose(); partialListingBuilder_ = null; partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); partialListingBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getPartialListingFieldBuilder() : null; } else { partialListingBuilder_.addAllMessages(other.partialListing_); } } } if (other.hasParentIdx()) { setParentIdx(other.getParentIdx()); } if (other.hasException()) { mergeException(other.getException()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasParentIdx()) { return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { return false; } } if (hasException()) { if (!getException().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List partialListing_ = java.util.Collections.emptyList(); private void ensurePartialListingIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { partialListing_ = new java.util.ArrayList(partialListing_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { if (partialListingBuilder_ == null) { return java.util.Collections.unmodifiableList(partialListing_); } else { return partialListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { if (partialListingBuilder_ == null) { return partialListing_.size(); } else { return partialListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.set(index, value); onChanged(); } else { partialListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.set(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(value); onChanged(); } else { partialListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(index, value); onChanged(); } else { partialListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addAllPartialListing( java.lang.Iterable values) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, partialListing_); onChanged(); } else { partialListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder clearPartialListing() { if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { partialListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder removePartialListing(int index) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.remove(index); onChanged(); } else { partialListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder( int index) { return getPartialListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { if (partialListingBuilder_ != null) { return partialListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(partialListing_); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() { return getPartialListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder( int index) { return getPartialListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingBuilderList() { return getPartialListingFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getPartialListingFieldBuilder() { if (partialListingBuilder_ == null) { partialListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( partialListing_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); partialListing_ = null; } return partialListingBuilder_; } private int parentIdx_ ; /** * required uint32 parentIdx = 2; */ public boolean hasParentIdx() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 parentIdx = 2; */ public int getParentIdx() { return parentIdx_; } /** * required uint32 parentIdx = 2; */ public Builder setParentIdx(int value) { bitField0_ |= 0x00000002; parentIdx_ = value; onChanged(); return this; } /** * required uint32 parentIdx = 2; */ public Builder clearParentIdx() { bitField0_ = (bitField0_ & ~0x00000002); parentIdx_ = 0; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto exception_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder> exceptionBuilder_; /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public boolean hasException() { return ((bitField0_ & 0x00000004) != 0); } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException() { if (exceptionBuilder_ == null) { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } else { return exceptionBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder setException(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto value) { if (exceptionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } exception_ = value; onChanged(); } else { exceptionBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder setException( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder builderForValue) { if (exceptionBuilder_ == null) { exception_ = builderForValue.build(); onChanged(); } else { exceptionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder mergeException(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto value) { if (exceptionBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0) && exception_ != null && exception_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance()) { exception_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.newBuilder(exception_).mergeFrom(value).buildPartial(); } else { exception_ = value; } onChanged(); } else { exceptionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public Builder clearException() { if (exceptionBuilder_ == null) { exception_ = null; onChanged(); } else { exceptionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder getExceptionBuilder() { bitField0_ |= 0x00000004; onChanged(); return getExceptionFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder() { if (exceptionBuilder_ != null) { return exceptionBuilder_.getMessageOrBuilder(); } else { return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_; } } /** * optional .hadoop.hdfs.RemoteExceptionProto exception = 3; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder> getExceptionFieldBuilder() { if (exceptionBuilder_ == null) { exceptionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder>( getException(), getParentForChildren(), isClean()); exception_ = null; } return exceptionBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BatchedDirectoryListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BatchedDirectoryListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BatchedDirectoryListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new BatchedDirectoryListingProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshottableDirectoryStatusProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshottableDirectoryStatusProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ boolean hasDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder(); /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; */ boolean hasSnapshotQuota(); /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; */ int getSnapshotQuota(); /** * required uint32 snapshot_number = 3; */ boolean hasSnapshotNumber(); /** * required uint32 snapshot_number = 3; */ int getSnapshotNumber(); /** * required bytes parent_fullpath = 4; */ boolean hasParentFullpath(); /** * required bytes parent_fullpath = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath(); } /** *
   **
   * Status of a snapshottable directory: besides the normal information for 
   * a directory status, also include snapshot quota, number of snapshots, and
   * the full path of the parent directory. 
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto} */ public static final class SnapshottableDirectoryStatusProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshottableDirectoryStatusProto) SnapshottableDirectoryStatusProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshottableDirectoryStatusProto.newBuilder() to construct. private SnapshottableDirectoryStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshottableDirectoryStatusProto() { parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshottableDirectoryStatusProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = dirStatus_.toBuilder(); } dirStatus_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(dirStatus_); dirStatus_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; snapshotQuota_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; snapshotNumber_ = input.readUInt32(); break; } case 34: { bitField0_ |= 0x00000008; parentFullpath_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class); } private int bitField0_; public static final int DIRSTATUS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } public static final int SNAPSHOT_QUOTA_FIELD_NUMBER = 2; private int snapshotQuota_; /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; */ public boolean hasSnapshotQuota() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * Fields specific for snapshottable directory
     * 
* * required uint32 snapshot_quota = 2; */ public int getSnapshotQuota() { return snapshotQuota_; } public static final int SNAPSHOT_NUMBER_FIELD_NUMBER = 3; private int snapshotNumber_; /** * required uint32 snapshot_number = 3; */ public boolean hasSnapshotNumber() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 snapshot_number = 3; */ public int getSnapshotNumber() { return snapshotNumber_; } public static final int PARENT_FULLPATH_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_; /** * required bytes parent_fullpath = 4; */ public boolean hasParentFullpath() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes parent_fullpath = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() { return parentFullpath_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasDirStatus()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotNumber()) { memoizedIsInitialized = 0; return false; } if (!hasParentFullpath()) { memoizedIsInitialized = 0; return false; } if (!getDirStatus().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getDirStatus()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, snapshotQuota_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, snapshotNumber_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, parentFullpath_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getDirStatus()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, snapshotQuota_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, snapshotNumber_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, parentFullpath_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) obj; if (hasDirStatus() != other.hasDirStatus()) return false; if (hasDirStatus()) { if (!getDirStatus() .equals(other.getDirStatus())) return false; } if (hasSnapshotQuota() != other.hasSnapshotQuota()) return false; if (hasSnapshotQuota()) { if (getSnapshotQuota() != other.getSnapshotQuota()) return false; } if (hasSnapshotNumber() != other.hasSnapshotNumber()) return false; if (hasSnapshotNumber()) { if (getSnapshotNumber() != other.getSnapshotNumber()) return false; } if (hasParentFullpath() != other.hasParentFullpath()) return false; if (hasParentFullpath()) { if (!getParentFullpath() .equals(other.getParentFullpath())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasDirStatus()) { hash = (37 * hash) + DIRSTATUS_FIELD_NUMBER; hash = (53 * hash) + getDirStatus().hashCode(); } if (hasSnapshotQuota()) { hash = (37 * hash) + SNAPSHOT_QUOTA_FIELD_NUMBER; hash = (53 * hash) + getSnapshotQuota(); } if (hasSnapshotNumber()) { hash = (37 * hash) + SNAPSHOT_NUMBER_FIELD_NUMBER; hash = (53 * hash) + getSnapshotNumber(); } if (hasParentFullpath()) { hash = (37 * hash) + PARENT_FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getParentFullpath().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Status of a snapshottable directory: besides the normal information for 
     * a directory status, also include snapshot quota, number of snapshots, and
     * the full path of the parent directory. 
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshottableDirectoryStatusProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getDirStatusFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (dirStatusBuilder_ == null) { dirStatus_ = null; } else { dirStatusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); snapshotQuota_ = 0; bitField0_ = (bitField0_ & ~0x00000002); snapshotNumber_ = 0; bitField0_ = (bitField0_ & ~0x00000004); parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (dirStatusBuilder_ == null) { result.dirStatus_ = dirStatus_; } else { result.dirStatus_ = dirStatusBuilder_.build(); } to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.snapshotQuota_ = snapshotQuota_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.snapshotNumber_ = snapshotNumber_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.parentFullpath_ = parentFullpath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()) return this; if (other.hasDirStatus()) { mergeDirStatus(other.getDirStatus()); } if (other.hasSnapshotQuota()) { setSnapshotQuota(other.getSnapshotQuota()); } if (other.hasSnapshotNumber()) { setSnapshotNumber(other.getSnapshotNumber()); } if (other.hasParentFullpath()) { setParentFullpath(other.getParentFullpath()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasDirStatus()) { return false; } if (!hasSnapshotQuota()) { return false; } if (!hasSnapshotNumber()) { return false; } if (!hasParentFullpath()) { return false; } if (!getDirStatus().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> dirStatusBuilder_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { if (dirStatusBuilder_ == null) { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } else { return dirStatusBuilder_.getMessage(); } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dirStatus_ = value; onChanged(); } else { dirStatusBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (dirStatusBuilder_ == null) { dirStatus_ = builderForValue.build(); onChanged(); } else { dirStatusBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder mergeDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && dirStatus_ != null && dirStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(dirStatus_).mergeFrom(value).buildPartial(); } else { dirStatus_ = value; } onChanged(); } else { dirStatusBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder clearDirStatus() { if (dirStatusBuilder_ == null) { dirStatus_ = null; onChanged(); } else { dirStatusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getDirStatusBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDirStatusFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { if (dirStatusBuilder_ != null) { return dirStatusBuilder_.getMessageOrBuilder(); } else { return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_; } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getDirStatusFieldBuilder() { if (dirStatusBuilder_ == null) { dirStatusBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( getDirStatus(), getParentForChildren(), isClean()); dirStatus_ = null; } return dirStatusBuilder_; } private int snapshotQuota_ ; /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; */ public boolean hasSnapshotQuota() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; */ public int getSnapshotQuota() { return snapshotQuota_; } /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; */ public Builder setSnapshotQuota(int value) { bitField0_ |= 0x00000002; snapshotQuota_ = value; onChanged(); return this; } /** *
       * Fields specific for snapshottable directory
       * 
* * required uint32 snapshot_quota = 2; */ public Builder clearSnapshotQuota() { bitField0_ = (bitField0_ & ~0x00000002); snapshotQuota_ = 0; onChanged(); return this; } private int snapshotNumber_ ; /** * required uint32 snapshot_number = 3; */ public boolean hasSnapshotNumber() { return ((bitField0_ & 0x00000004) != 0); } /** * required uint32 snapshot_number = 3; */ public int getSnapshotNumber() { return snapshotNumber_; } /** * required uint32 snapshot_number = 3; */ public Builder setSnapshotNumber(int value) { bitField0_ |= 0x00000004; snapshotNumber_ = value; onChanged(); return this; } /** * required uint32 snapshot_number = 3; */ public Builder clearSnapshotNumber() { bitField0_ = (bitField0_ & ~0x00000004); snapshotNumber_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes parent_fullpath = 4; */ public boolean hasParentFullpath() { return ((bitField0_ & 0x00000008) != 0); } /** * required bytes parent_fullpath = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() { return parentFullpath_; } /** * required bytes parent_fullpath = 4; */ public Builder setParentFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; parentFullpath_ = value; onChanged(); return this; } /** * required bytes parent_fullpath = 4; */ public Builder clearParentFullpath() { bitField0_ = (bitField0_ & ~0x00000008); parentFullpath_ = getDefaultInstance().getParentFullpath(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshottableDirectoryStatusProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshottableDirectoryStatusProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshottableDirectoryListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshottableDirectoryListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ java.util.List getSnapshottableDirListingList(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ int getSnapshottableDirListingCount(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ java.util.List getSnapshottableDirListingOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index); } /** *
   **
   * Snapshottable directory listing
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto} */ public static final class SnapshottableDirectoryListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshottableDirectoryListingProto) SnapshottableDirectoryListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshottableDirectoryListingProto.newBuilder() to construct. private SnapshottableDirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshottableDirectoryListingProto() { snapshottableDirListing_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshottableDirectoryListingProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { snapshottableDirListing_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } snapshottableDirListing_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class); } public static final int SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER = 1; private java.util.List snapshottableDirListing_; /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingList() { return snapshottableDirListing_; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingOrBuilderList() { return snapshottableDirListing_; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public int getSnapshottableDirListingCount() { return snapshottableDirListing_.size(); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) { return snapshottableDirListing_.get(index); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index) { return snapshottableDirListing_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getSnapshottableDirListingCount(); i++) { if (!getSnapshottableDirListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < snapshottableDirListing_.size(); i++) { output.writeMessage(1, snapshottableDirListing_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < snapshottableDirListing_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, snapshottableDirListing_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) obj; if (!getSnapshottableDirListingList() .equals(other.getSnapshottableDirListingList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getSnapshottableDirListingCount() > 0) { hash = (37 * hash) + SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER; hash = (53 * hash) + getSnapshottableDirListingList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshottable directory listing
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshottableDirectoryListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSnapshottableDirListingFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (snapshottableDirListingBuilder_ == null) { snapshottableDirListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { snapshottableDirListingBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto(this); int from_bitField0_ = bitField0_; if (snapshottableDirListingBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.snapshottableDirListing_ = snapshottableDirListing_; } else { result.snapshottableDirListing_ = snapshottableDirListingBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance()) return this; if (snapshottableDirListingBuilder_ == null) { if (!other.snapshottableDirListing_.isEmpty()) { if (snapshottableDirListing_.isEmpty()) { snapshottableDirListing_ = other.snapshottableDirListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.addAll(other.snapshottableDirListing_); } onChanged(); } } else { if (!other.snapshottableDirListing_.isEmpty()) { if (snapshottableDirListingBuilder_.isEmpty()) { snapshottableDirListingBuilder_.dispose(); snapshottableDirListingBuilder_ = null; snapshottableDirListing_ = other.snapshottableDirListing_; bitField0_ = (bitField0_ & ~0x00000001); snapshottableDirListingBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSnapshottableDirListingFieldBuilder() : null; } else { snapshottableDirListingBuilder_.addAllMessages(other.snapshottableDirListing_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getSnapshottableDirListingCount(); i++) { if (!getSnapshottableDirListing(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List snapshottableDirListing_ = java.util.Collections.emptyList(); private void ensureSnapshottableDirListingIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { snapshottableDirListing_ = new java.util.ArrayList(snapshottableDirListing_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> snapshottableDirListingBuilder_; /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingList() { if (snapshottableDirListingBuilder_ == null) { return java.util.Collections.unmodifiableList(snapshottableDirListing_); } else { return snapshottableDirListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public int getSnapshottableDirListingCount() { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.size(); } else { return snapshottableDirListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.get(index); } else { return snapshottableDirListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder setSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.set(index, value); onChanged(); } else { snapshottableDirListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder setSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.set(index, builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(value); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(index, value); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(index, builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addAllSnapshottableDirListing( java.lang.Iterable values) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, snapshottableDirListing_); onChanged(); } else { snapshottableDirListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder clearSnapshottableDirListing() { if (snapshottableDirListingBuilder_ == null) { snapshottableDirListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { snapshottableDirListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder removeSnapshottableDirListing(int index) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.remove(index); onChanged(); } else { snapshottableDirListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder getSnapshottableDirListingBuilder( int index) { return getSnapshottableDirListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index) { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.get(index); } else { return snapshottableDirListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingOrBuilderList() { if (snapshottableDirListingBuilder_ != null) { return snapshottableDirListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(snapshottableDirListing_); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder() { return getSnapshottableDirListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder( int index) { return getSnapshottableDirListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingBuilderList() { return getSnapshottableDirListingFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> getSnapshottableDirListingFieldBuilder() { if (snapshottableDirListingBuilder_ == null) { snapshottableDirListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>( snapshottableDirListing_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); snapshottableDirListing_ = null; } return snapshottableDirListingBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshottableDirectoryListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshottableDirectoryListingProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportEntryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportEntryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes fullpath = 1; */ boolean hasFullpath(); /** * required bytes fullpath = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath(); /** * required string modificationLabel = 2; */ boolean hasModificationLabel(); /** * required string modificationLabel = 2; */ java.lang.String getModificationLabel(); /** * required string modificationLabel = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getModificationLabelBytes(); /** * optional bytes targetPath = 3; */ boolean hasTargetPath(); /** * optional bytes targetPath = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath(); } /** *
   **
   * Snapshot diff report entry
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto} */ public static final class SnapshotDiffReportEntryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportEntryProto) SnapshotDiffReportEntryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportEntryProto.newBuilder() to construct. private SnapshotDiffReportEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportEntryProto() { fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; modificationLabel_ = ""; targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffReportEntryProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { bitField0_ |= 0x00000001; fullpath_ = input.readBytes(); break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; modificationLabel_ = bs; break; } case 26: { bitField0_ |= 0x00000004; targetPath_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class); } private int bitField0_; public static final int FULLPATH_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_; /** * required bytes fullpath = 1; */ public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } public static final int MODIFICATIONLABEL_FIELD_NUMBER = 2; private volatile java.lang.Object modificationLabel_; /** * required string modificationLabel = 2; */ public boolean hasModificationLabel() { return ((bitField0_ & 0x00000002) != 0); } /** * required string modificationLabel = 2; */ public java.lang.String getModificationLabel() { java.lang.Object ref = modificationLabel_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { modificationLabel_ = s; } return s; } } /** * required string modificationLabel = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getModificationLabelBytes() { java.lang.Object ref = modificationLabel_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); modificationLabel_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int TARGETPATH_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_; /** * optional bytes targetPath = 3; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes targetPath = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFullpath()) { memoizedIsInitialized = 0; return false; } if (!hasModificationLabel()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, modificationLabel_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, targetPath_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, modificationLabel_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, targetPath_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) obj; if (hasFullpath() != other.hasFullpath()) return false; if (hasFullpath()) { if (!getFullpath() .equals(other.getFullpath())) return false; } if (hasModificationLabel() != other.hasModificationLabel()) return false; if (hasModificationLabel()) { if (!getModificationLabel() .equals(other.getModificationLabel())) return false; } if (hasTargetPath() != other.hasTargetPath()) return false; if (hasTargetPath()) { if (!getTargetPath() .equals(other.getTargetPath())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFullpath()) { hash = (37 * hash) + FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getFullpath().hashCode(); } if (hasModificationLabel()) { hash = (37 * hash) + MODIFICATIONLABEL_FIELD_NUMBER; hash = (53 * hash) + getModificationLabel().hashCode(); } if (hasTargetPath()) { hash = (37 * hash) + TARGETPATH_FIELD_NUMBER; hash = (53 * hash) + getTargetPath().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report entry
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportEntryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); modificationLabel_ = ""; bitField0_ = (bitField0_ & ~0x00000002); targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.fullpath_ = fullpath_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.modificationLabel_ = modificationLabel_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.targetPath_ = targetPath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()) return this; if (other.hasFullpath()) { setFullpath(other.getFullpath()); } if (other.hasModificationLabel()) { bitField0_ |= 0x00000002; modificationLabel_ = other.modificationLabel_; onChanged(); } if (other.hasTargetPath()) { setTargetPath(other.getTargetPath()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFullpath()) { return false; } if (!hasModificationLabel()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes fullpath = 1; */ public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } /** * required bytes fullpath = 1; */ public Builder setFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; fullpath_ = value; onChanged(); return this; } /** * required bytes fullpath = 1; */ public Builder clearFullpath() { bitField0_ = (bitField0_ & ~0x00000001); fullpath_ = getDefaultInstance().getFullpath(); onChanged(); return this; } private java.lang.Object modificationLabel_ = ""; /** * required string modificationLabel = 2; */ public boolean hasModificationLabel() { return ((bitField0_ & 0x00000002) != 0); } /** * required string modificationLabel = 2; */ public java.lang.String getModificationLabel() { java.lang.Object ref = modificationLabel_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { modificationLabel_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string modificationLabel = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getModificationLabelBytes() { java.lang.Object ref = modificationLabel_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); modificationLabel_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string modificationLabel = 2; */ public Builder setModificationLabel( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; modificationLabel_ = value; onChanged(); return this; } /** * required string modificationLabel = 2; */ public Builder clearModificationLabel() { bitField0_ = (bitField0_ & ~0x00000002); modificationLabel_ = getDefaultInstance().getModificationLabel(); onChanged(); return this; } /** * required string modificationLabel = 2; */ public Builder setModificationLabelBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; modificationLabel_ = value; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes targetPath = 3; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes targetPath = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } /** * optional bytes targetPath = 3; */ public Builder setTargetPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; targetPath_ = value; onChanged(); return this; } /** * optional bytes targetPath = 3; */ public Builder clearTargetPath() { bitField0_ = (bitField0_ & ~0x00000004); targetPath_ = getDefaultInstance().getTargetPath(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportEntryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportEntryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportEntryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotDiffReportEntryProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes(); /** * required string fromSnapshot = 2; */ boolean hasFromSnapshot(); /** * required string fromSnapshot = 2; */ java.lang.String getFromSnapshot(); /** * required string fromSnapshot = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getFromSnapshotBytes(); /** * required string toSnapshot = 3; */ boolean hasToSnapshot(); /** * required string toSnapshot = 3; */ java.lang.String getToSnapshot(); /** * required string toSnapshot = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getToSnapshotBytes(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ java.util.List getDiffReportEntriesList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ int getDiffReportEntriesCount(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ java.util.List getDiffReportEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index); } /** *
   **
   * Snapshot diff report
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto} */ public static final class SnapshotDiffReportProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportProto) SnapshotDiffReportProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportProto.newBuilder() to construct. private SnapshotDiffReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportProto() { snapshotRoot_ = ""; fromSnapshot_ = ""; toSnapshot_ = ""; diffReportEntries_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffReportProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; snapshotRoot_ = bs; break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; fromSnapshot_ = bs; break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; toSnapshot_ = bs; break; } case 34: { if (!((mutable_bitField0_ & 0x00000008) != 0)) { diffReportEntries_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } diffReportEntries_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000008) != 0)) { diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class); } private int bitField0_; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private volatile java.lang.Object snapshotRoot_; /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** *
     * full path of the directory where snapshots were taken
     * 
* * required string snapshotRoot = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int FROMSNAPSHOT_FIELD_NUMBER = 2; private volatile java.lang.Object fromSnapshot_; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromSnapshot_ = s; } return s; } } /** * required string fromSnapshot = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int TOSNAPSHOT_FIELD_NUMBER = 3; private volatile java.lang.Object toSnapshot_; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) != 0); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { toSnapshot_ = s; } return s; } } /** * required string toSnapshot = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int DIFFREPORTENTRIES_FIELD_NUMBER = 4; private java.util.List diffReportEntries_; /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesList() { return diffReportEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesOrBuilderList() { return diffReportEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public int getDiffReportEntriesCount() { return diffReportEntries_.size(); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) { return diffReportEntries_.get(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index) { return diffReportEntries_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasFromSnapshot()) { memoizedIsInitialized = 0; return false; } if (!hasToSnapshot()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getDiffReportEntriesCount(); i++) { if (!getDiffReportEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, snapshotRoot_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, fromSnapshot_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, toSnapshot_); } for (int i = 0; i < diffReportEntries_.size(); i++) { output.writeMessage(4, diffReportEntries_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, snapshotRoot_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, fromSnapshot_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, toSnapshot_); } for (int i = 0; i < diffReportEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, diffReportEntries_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) obj; if (hasSnapshotRoot() != other.hasSnapshotRoot()) return false; if (hasSnapshotRoot()) { if (!getSnapshotRoot() .equals(other.getSnapshotRoot())) return false; } if (hasFromSnapshot() != other.hasFromSnapshot()) return false; if (hasFromSnapshot()) { if (!getFromSnapshot() .equals(other.getFromSnapshot())) return false; } if (hasToSnapshot() != other.hasToSnapshot()) return false; if (hasToSnapshot()) { if (!getToSnapshot() .equals(other.getToSnapshot())) return false; } if (!getDiffReportEntriesList() .equals(other.getDiffReportEntriesList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasFromSnapshot()) { hash = (37 * hash) + FROMSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getFromSnapshot().hashCode(); } if (hasToSnapshot()) { hash = (37 * hash) + TOSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getToSnapshot().hashCode(); } if (getDiffReportEntriesCount() > 0) { hash = (37 * hash) + DIFFREPORTENTRIES_FIELD_NUMBER; hash = (53 * hash) + getDiffReportEntriesList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getDiffReportEntriesFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); fromSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000002); toSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000004); if (diffReportEntriesBuilder_ == null) { diffReportEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); } else { diffReportEntriesBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.fromSnapshot_ = fromSnapshot_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.toSnapshot_ = toSnapshot_; if (diffReportEntriesBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0)) { diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_); bitField0_ = (bitField0_ & ~0x00000008); } result.diffReportEntries_ = diffReportEntries_; } else { result.diffReportEntries_ = diffReportEntriesBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasFromSnapshot()) { bitField0_ |= 0x00000002; fromSnapshot_ = other.fromSnapshot_; onChanged(); } if (other.hasToSnapshot()) { bitField0_ |= 0x00000004; toSnapshot_ = other.toSnapshot_; onChanged(); } if (diffReportEntriesBuilder_ == null) { if (!other.diffReportEntries_.isEmpty()) { if (diffReportEntries_.isEmpty()) { diffReportEntries_ = other.diffReportEntries_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureDiffReportEntriesIsMutable(); diffReportEntries_.addAll(other.diffReportEntries_); } onChanged(); } } else { if (!other.diffReportEntries_.isEmpty()) { if (diffReportEntriesBuilder_.isEmpty()) { diffReportEntriesBuilder_.dispose(); diffReportEntriesBuilder_ = null; diffReportEntries_ = other.diffReportEntries_; bitField0_ = (bitField0_ & ~0x00000008); diffReportEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getDiffReportEntriesFieldBuilder() : null; } else { diffReportEntriesBuilder_.addAllMessages(other.diffReportEntries_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } if (!hasFromSnapshot()) { return false; } if (!hasToSnapshot()) { return false; } for (int i = 0; i < getDiffReportEntriesCount(); i++) { if (!getDiffReportEntries(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object snapshotRoot_ = ""; /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } private java.lang.Object fromSnapshot_ = ""; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromSnapshot_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string fromSnapshot = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder clearFromSnapshot() { bitField0_ = (bitField0_ & ~0x00000002); fromSnapshot_ = getDefaultInstance().getFromSnapshot(); onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshotBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } private java.lang.Object toSnapshot_ = ""; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) != 0); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { toSnapshot_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string toSnapshot = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string toSnapshot = 3; */ public Builder setToSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder clearToSnapshot() { bitField0_ = (bitField0_ & ~0x00000004); toSnapshot_ = getDefaultInstance().getToSnapshot(); onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder setToSnapshotBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } private java.util.List diffReportEntries_ = java.util.Collections.emptyList(); private void ensureDiffReportEntriesIsMutable() { if (!((bitField0_ & 0x00000008) != 0)) { diffReportEntries_ = new java.util.ArrayList(diffReportEntries_); bitField0_ |= 0x00000008; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> diffReportEntriesBuilder_; /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesList() { if (diffReportEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(diffReportEntries_); } else { return diffReportEntriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public int getDiffReportEntriesCount() { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.size(); } else { return diffReportEntriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.get(index); } else { return diffReportEntriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder setDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.set(index, value); onChanged(); } else { diffReportEntriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder setDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.set(index, builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(value); onChanged(); } else { diffReportEntriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(index, value); onChanged(); } else { diffReportEntriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(index, builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addAllDiffReportEntries( java.lang.Iterable values) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, diffReportEntries_); onChanged(); } else { diffReportEntriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder clearDiffReportEntries() { if (diffReportEntriesBuilder_ == null) { diffReportEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { diffReportEntriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder removeDiffReportEntries(int index) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.remove(index); onChanged(); } else { diffReportEntriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder getDiffReportEntriesBuilder( int index) { return getDiffReportEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index) { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.get(index); } else { return diffReportEntriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesOrBuilderList() { if (diffReportEntriesBuilder_ != null) { return diffReportEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(diffReportEntries_); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder() { return getDiffReportEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder( int index) { return getDiffReportEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesBuilderList() { return getDiffReportEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> getDiffReportEntriesFieldBuilder() { if (diffReportEntriesBuilder_ == null) { diffReportEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>( diffReportEntries_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); diffReportEntries_ = null; } return diffReportEntriesBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotDiffReportProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportListingEntryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportListingEntryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes fullpath = 1; */ boolean hasFullpath(); /** * required bytes fullpath = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath(); /** * required uint64 dirId = 2; */ boolean hasDirId(); /** * required uint64 dirId = 2; */ long getDirId(); /** * required bool isReference = 3; */ boolean hasIsReference(); /** * required bool isReference = 3; */ boolean getIsReference(); /** * optional bytes targetPath = 4; */ boolean hasTargetPath(); /** * optional bytes targetPath = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath(); /** * optional uint64 fileId = 5; */ boolean hasFileId(); /** * optional uint64 fileId = 5; */ long getFileId(); } /** *
   **
   * Snapshot diff report listing entry
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingEntryProto} */ public static final class SnapshotDiffReportListingEntryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportListingEntryProto) SnapshotDiffReportListingEntryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportListingEntryProto.newBuilder() to construct. private SnapshotDiffReportListingEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportListingEntryProto() { fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffReportListingEntryProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { bitField0_ |= 0x00000001; fullpath_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; dirId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; isReference_ = input.readBool(); break; } case 34: { bitField0_ |= 0x00000008; targetPath_ = input.readBytes(); break; } case 40: { bitField0_ |= 0x00000010; fileId_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder.class); } private int bitField0_; public static final int FULLPATH_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_; /** * required bytes fullpath = 1; */ public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } public static final int DIRID_FIELD_NUMBER = 2; private long dirId_; /** * required uint64 dirId = 2; */ public boolean hasDirId() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 dirId = 2; */ public long getDirId() { return dirId_; } public static final int ISREFERENCE_FIELD_NUMBER = 3; private boolean isReference_; /** * required bool isReference = 3; */ public boolean hasIsReference() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool isReference = 3; */ public boolean getIsReference() { return isReference_; } public static final int TARGETPATH_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_; /** * optional bytes targetPath = 4; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes targetPath = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } public static final int FILEID_FIELD_NUMBER = 5; private long fileId_; /** * optional uint64 fileId = 5; */ public boolean hasFileId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 fileId = 5; */ public long getFileId() { return fileId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFullpath()) { memoizedIsInitialized = 0; return false; } if (!hasDirId()) { memoizedIsInitialized = 0; return false; } if (!hasIsReference()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, dirId_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBool(3, isReference_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, targetPath_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, fileId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, fullpath_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, dirId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(3, isReference_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, targetPath_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, fileId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto) obj; if (hasFullpath() != other.hasFullpath()) return false; if (hasFullpath()) { if (!getFullpath() .equals(other.getFullpath())) return false; } if (hasDirId() != other.hasDirId()) return false; if (hasDirId()) { if (getDirId() != other.getDirId()) return false; } if (hasIsReference() != other.hasIsReference()) return false; if (hasIsReference()) { if (getIsReference() != other.getIsReference()) return false; } if (hasTargetPath() != other.hasTargetPath()) return false; if (hasTargetPath()) { if (!getTargetPath() .equals(other.getTargetPath())) return false; } if (hasFileId() != other.hasFileId()) return false; if (hasFileId()) { if (getFileId() != other.getFileId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFullpath()) { hash = (37 * hash) + FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getFullpath().hashCode(); } if (hasDirId()) { hash = (37 * hash) + DIRID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDirId()); } if (hasIsReference()) { hash = (37 * hash) + ISREFERENCE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsReference()); } if (hasTargetPath()) { hash = (37 * hash) + TARGETPATH_FIELD_NUMBER; hash = (53 * hash) + getTargetPath().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileId()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report listing entry
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingEntryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportListingEntryProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); dirId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); isReference_ = false; bitField0_ = (bitField0_ & ~0x00000004); targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.fullpath_ = fullpath_; if (((from_bitField0_ & 0x00000002) != 0)) { result.dirId_ = dirId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.isReference_ = isReference_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.targetPath_ = targetPath_; if (((from_bitField0_ & 0x00000010) != 0)) { result.fileId_ = fileId_; to_bitField0_ |= 0x00000010; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()) return this; if (other.hasFullpath()) { setFullpath(other.getFullpath()); } if (other.hasDirId()) { setDirId(other.getDirId()); } if (other.hasIsReference()) { setIsReference(other.getIsReference()); } if (other.hasTargetPath()) { setTargetPath(other.getTargetPath()); } if (other.hasFileId()) { setFileId(other.getFileId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFullpath()) { return false; } if (!hasDirId()) { return false; } if (!hasIsReference()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes fullpath = 1; */ public boolean hasFullpath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes fullpath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() { return fullpath_; } /** * required bytes fullpath = 1; */ public Builder setFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; fullpath_ = value; onChanged(); return this; } /** * required bytes fullpath = 1; */ public Builder clearFullpath() { bitField0_ = (bitField0_ & ~0x00000001); fullpath_ = getDefaultInstance().getFullpath(); onChanged(); return this; } private long dirId_ ; /** * required uint64 dirId = 2; */ public boolean hasDirId() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 dirId = 2; */ public long getDirId() { return dirId_; } /** * required uint64 dirId = 2; */ public Builder setDirId(long value) { bitField0_ |= 0x00000002; dirId_ = value; onChanged(); return this; } /** * required uint64 dirId = 2; */ public Builder clearDirId() { bitField0_ = (bitField0_ & ~0x00000002); dirId_ = 0L; onChanged(); return this; } private boolean isReference_ ; /** * required bool isReference = 3; */ public boolean hasIsReference() { return ((bitField0_ & 0x00000004) != 0); } /** * required bool isReference = 3; */ public boolean getIsReference() { return isReference_; } /** * required bool isReference = 3; */ public Builder setIsReference(boolean value) { bitField0_ |= 0x00000004; isReference_ = value; onChanged(); return this; } /** * required bool isReference = 3; */ public Builder clearIsReference() { bitField0_ = (bitField0_ & ~0x00000004); isReference_ = false; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes targetPath = 4; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes targetPath = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() { return targetPath_; } /** * optional bytes targetPath = 4; */ public Builder setTargetPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; targetPath_ = value; onChanged(); return this; } /** * optional bytes targetPath = 4; */ public Builder clearTargetPath() { bitField0_ = (bitField0_ & ~0x00000008); targetPath_ = getDefaultInstance().getTargetPath(); onChanged(); return this; } private long fileId_ ; /** * optional uint64 fileId = 5; */ public boolean hasFileId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 fileId = 5; */ public long getFileId() { return fileId_; } /** * optional uint64 fileId = 5; */ public Builder setFileId(long value) { bitField0_ |= 0x00000010; fileId_ = value; onChanged(); return this; } /** * optional uint64 fileId = 5; */ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000010); fileId_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportListingEntryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportListingEntryProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportListingEntryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotDiffReportListingEntryProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportCursorProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportCursorProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bytes startPath = 1; */ boolean hasStartPath(); /** * required bytes startPath = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath(); /** * required int32 index = 2 [default = -1]; */ boolean hasIndex(); /** * required int32 index = 2 [default = -1]; */ int getIndex(); } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportCursorProto} */ public static final class SnapshotDiffReportCursorProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportCursorProto) SnapshotDiffReportCursorProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportCursorProto.newBuilder() to construct. private SnapshotDiffReportCursorProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportCursorProto() { startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; index_ = -1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffReportCursorProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { bitField0_ |= 0x00000001; startPath_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; index_ = input.readInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder.class); } private int bitField0_; public static final int STARTPATH_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString startPath_; /** * required bytes startPath = 1; */ public boolean hasStartPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes startPath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath() { return startPath_; } public static final int INDEX_FIELD_NUMBER = 2; private int index_; /** * required int32 index = 2 [default = -1]; */ public boolean hasIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required int32 index = 2 [default = -1]; */ public int getIndex() { return index_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasStartPath()) { memoizedIsInitialized = 0; return false; } if (!hasIndex()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, startPath_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeInt32(2, index_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, startPath_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32Size(2, index_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto) obj; if (hasStartPath() != other.hasStartPath()) return false; if (hasStartPath()) { if (!getStartPath() .equals(other.getStartPath())) return false; } if (hasIndex() != other.hasIndex()) return false; if (hasIndex()) { if (getIndex() != other.getIndex()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasStartPath()) { hash = (37 * hash) + STARTPATH_FIELD_NUMBER; hash = (53 * hash) + getStartPath().hashCode(); } if (hasIndex()) { hash = (37 * hash) + INDEX_FIELD_NUMBER; hash = (53 * hash) + getIndex(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportCursorProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportCursorProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); index_ = -1; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.startPath_ = startPath_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.index_ = index_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance()) return this; if (other.hasStartPath()) { setStartPath(other.getStartPath()); } if (other.hasIndex()) { setIndex(other.getIndex()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasStartPath()) { return false; } if (!hasIndex()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * required bytes startPath = 1; */ public boolean hasStartPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required bytes startPath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath() { return startPath_; } /** * required bytes startPath = 1; */ public Builder setStartPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; startPath_ = value; onChanged(); return this; } /** * required bytes startPath = 1; */ public Builder clearStartPath() { bitField0_ = (bitField0_ & ~0x00000001); startPath_ = getDefaultInstance().getStartPath(); onChanged(); return this; } private int index_ = -1; /** * required int32 index = 2 [default = -1]; */ public boolean hasIndex() { return ((bitField0_ & 0x00000002) != 0); } /** * required int32 index = 2 [default = -1]; */ public int getIndex() { return index_; } /** * required int32 index = 2 [default = -1]; */ public Builder setIndex(int value) { bitField0_ |= 0x00000002; index_ = value; onChanged(); return this; } /** * required int32 index = 2 [default = -1]; */ public Builder clearIndex() { bitField0_ = (bitField0_ & ~0x00000002); index_ = -1; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportCursorProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportCursorProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportCursorProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotDiffReportCursorProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffReportListingProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportListingProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ java.util.List getModifiedEntriesList(); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ int getModifiedEntriesCount(); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ java.util.List getModifiedEntriesOrBuilderList(); /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder( int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ java.util.List getCreatedEntriesList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ int getCreatedEntriesCount(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ java.util.List getCreatedEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder( int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ java.util.List getDeletedEntriesList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ int getDeletedEntriesCount(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ java.util.List getDeletedEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder( int index); /** * required bool isFromEarlier = 4; */ boolean hasIsFromEarlier(); /** * required bool isFromEarlier = 4; */ boolean getIsFromEarlier(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ boolean hasCursor(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder(); } /** *
   **
   * Snapshot diff report listing
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingProto} */ public static final class SnapshotDiffReportListingProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportListingProto) SnapshotDiffReportListingProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffReportListingProto.newBuilder() to construct. private SnapshotDiffReportListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffReportListingProto() { modifiedEntries_ = java.util.Collections.emptyList(); createdEntries_ = java.util.Collections.emptyList(); deletedEntries_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffReportListingProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { modifiedEntries_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } modifiedEntries_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER, extensionRegistry)); break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) != 0)) { createdEntries_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } createdEntries_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER, extensionRegistry)); break; } case 26: { if (!((mutable_bitField0_ & 0x00000004) != 0)) { deletedEntries_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } deletedEntries_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER, extensionRegistry)); break; } case 32: { bitField0_ |= 0x00000001; isFromEarlier_ = input.readBool(); break; } case 42: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) != 0)) { subBuilder = cursor_.toBuilder(); } cursor_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(cursor_); cursor_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { modifiedEntries_ = java.util.Collections.unmodifiableList(modifiedEntries_); } if (((mutable_bitField0_ & 0x00000002) != 0)) { createdEntries_ = java.util.Collections.unmodifiableList(createdEntries_); } if (((mutable_bitField0_ & 0x00000004) != 0)) { deletedEntries_ = java.util.Collections.unmodifiableList(deletedEntries_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder.class); } private int bitField0_; public static final int MODIFIEDENTRIES_FIELD_NUMBER = 1; private java.util.List modifiedEntries_; /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesList() { return modifiedEntries_; } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesOrBuilderList() { return modifiedEntries_; } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public int getModifiedEntriesCount() { return modifiedEntries_.size(); } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index) { return modifiedEntries_.get(index); } /** *
     * full path of the directory where snapshots were taken
     * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder( int index) { return modifiedEntries_.get(index); } public static final int CREATEDENTRIES_FIELD_NUMBER = 2; private java.util.List createdEntries_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesList() { return createdEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesOrBuilderList() { return createdEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public int getCreatedEntriesCount() { return createdEntries_.size(); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index) { return createdEntries_.get(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder( int index) { return createdEntries_.get(index); } public static final int DELETEDENTRIES_FIELD_NUMBER = 3; private java.util.List deletedEntries_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesList() { return deletedEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesOrBuilderList() { return deletedEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public int getDeletedEntriesCount() { return deletedEntries_.size(); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index) { return deletedEntries_.get(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder( int index) { return deletedEntries_.get(index); } public static final int ISFROMEARLIER_FIELD_NUMBER = 4; private boolean isFromEarlier_; /** * required bool isFromEarlier = 4; */ public boolean hasIsFromEarlier() { return ((bitField0_ & 0x00000001) != 0); } /** * required bool isFromEarlier = 4; */ public boolean getIsFromEarlier() { return isFromEarlier_; } public static final int CURSOR_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_; /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public boolean hasCursor() { return ((bitField0_ & 0x00000002) != 0); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasIsFromEarlier()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getModifiedEntriesCount(); i++) { if (!getModifiedEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getCreatedEntriesCount(); i++) { if (!getCreatedEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getDeletedEntriesCount(); i++) { if (!getDeletedEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasCursor()) { if (!getCursor().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < modifiedEntries_.size(); i++) { output.writeMessage(1, modifiedEntries_.get(i)); } for (int i = 0; i < createdEntries_.size(); i++) { output.writeMessage(2, createdEntries_.get(i)); } for (int i = 0; i < deletedEntries_.size(); i++) { output.writeMessage(3, deletedEntries_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { output.writeBool(4, isFromEarlier_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(5, getCursor()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < modifiedEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, modifiedEntries_.get(i)); } for (int i = 0; i < createdEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, createdEntries_.get(i)); } for (int i = 0; i < deletedEntries_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, deletedEntries_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(4, isFromEarlier_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getCursor()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto) obj; if (!getModifiedEntriesList() .equals(other.getModifiedEntriesList())) return false; if (!getCreatedEntriesList() .equals(other.getCreatedEntriesList())) return false; if (!getDeletedEntriesList() .equals(other.getDeletedEntriesList())) return false; if (hasIsFromEarlier() != other.hasIsFromEarlier()) return false; if (hasIsFromEarlier()) { if (getIsFromEarlier() != other.getIsFromEarlier()) return false; } if (hasCursor() != other.hasCursor()) return false; if (hasCursor()) { if (!getCursor() .equals(other.getCursor())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getModifiedEntriesCount() > 0) { hash = (37 * hash) + MODIFIEDENTRIES_FIELD_NUMBER; hash = (53 * hash) + getModifiedEntriesList().hashCode(); } if (getCreatedEntriesCount() > 0) { hash = (37 * hash) + CREATEDENTRIES_FIELD_NUMBER; hash = (53 * hash) + getCreatedEntriesList().hashCode(); } if (getDeletedEntriesCount() > 0) { hash = (37 * hash) + DELETEDENTRIES_FIELD_NUMBER; hash = (53 * hash) + getDeletedEntriesList().hashCode(); } if (hasIsFromEarlier()) { hash = (37 * hash) + ISFROMEARLIER_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsFromEarlier()); } if (hasCursor()) { hash = (37 * hash) + CURSOR_FIELD_NUMBER; hash = (53 * hash) + getCursor().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Snapshot diff report listing
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportListingProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getModifiedEntriesFieldBuilder(); getCreatedEntriesFieldBuilder(); getDeletedEntriesFieldBuilder(); getCursorFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (modifiedEntriesBuilder_ == null) { modifiedEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { modifiedEntriesBuilder_.clear(); } if (createdEntriesBuilder_ == null) { createdEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); } else { createdEntriesBuilder_.clear(); } if (deletedEntriesBuilder_ == null) { deletedEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); } else { deletedEntriesBuilder_.clear(); } isFromEarlier_ = false; bitField0_ = (bitField0_ & ~0x00000008); if (cursorBuilder_ == null) { cursor_ = null; } else { cursorBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (modifiedEntriesBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { modifiedEntries_ = java.util.Collections.unmodifiableList(modifiedEntries_); bitField0_ = (bitField0_ & ~0x00000001); } result.modifiedEntries_ = modifiedEntries_; } else { result.modifiedEntries_ = modifiedEntriesBuilder_.build(); } if (createdEntriesBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0)) { createdEntries_ = java.util.Collections.unmodifiableList(createdEntries_); bitField0_ = (bitField0_ & ~0x00000002); } result.createdEntries_ = createdEntries_; } else { result.createdEntries_ = createdEntriesBuilder_.build(); } if (deletedEntriesBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0)) { deletedEntries_ = java.util.Collections.unmodifiableList(deletedEntries_); bitField0_ = (bitField0_ & ~0x00000004); } result.deletedEntries_ = deletedEntries_; } else { result.deletedEntries_ = deletedEntriesBuilder_.build(); } if (((from_bitField0_ & 0x00000008) != 0)) { result.isFromEarlier_ = isFromEarlier_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000010) != 0)) { if (cursorBuilder_ == null) { result.cursor_ = cursor_; } else { result.cursor_ = cursorBuilder_.build(); } to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance()) return this; if (modifiedEntriesBuilder_ == null) { if (!other.modifiedEntries_.isEmpty()) { if (modifiedEntries_.isEmpty()) { modifiedEntries_ = other.modifiedEntries_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureModifiedEntriesIsMutable(); modifiedEntries_.addAll(other.modifiedEntries_); } onChanged(); } } else { if (!other.modifiedEntries_.isEmpty()) { if (modifiedEntriesBuilder_.isEmpty()) { modifiedEntriesBuilder_.dispose(); modifiedEntriesBuilder_ = null; modifiedEntries_ = other.modifiedEntries_; bitField0_ = (bitField0_ & ~0x00000001); modifiedEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getModifiedEntriesFieldBuilder() : null; } else { modifiedEntriesBuilder_.addAllMessages(other.modifiedEntries_); } } } if (createdEntriesBuilder_ == null) { if (!other.createdEntries_.isEmpty()) { if (createdEntries_.isEmpty()) { createdEntries_ = other.createdEntries_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureCreatedEntriesIsMutable(); createdEntries_.addAll(other.createdEntries_); } onChanged(); } } else { if (!other.createdEntries_.isEmpty()) { if (createdEntriesBuilder_.isEmpty()) { createdEntriesBuilder_.dispose(); createdEntriesBuilder_ = null; createdEntries_ = other.createdEntries_; bitField0_ = (bitField0_ & ~0x00000002); createdEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getCreatedEntriesFieldBuilder() : null; } else { createdEntriesBuilder_.addAllMessages(other.createdEntries_); } } } if (deletedEntriesBuilder_ == null) { if (!other.deletedEntries_.isEmpty()) { if (deletedEntries_.isEmpty()) { deletedEntries_ = other.deletedEntries_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureDeletedEntriesIsMutable(); deletedEntries_.addAll(other.deletedEntries_); } onChanged(); } } else { if (!other.deletedEntries_.isEmpty()) { if (deletedEntriesBuilder_.isEmpty()) { deletedEntriesBuilder_.dispose(); deletedEntriesBuilder_ = null; deletedEntries_ = other.deletedEntries_; bitField0_ = (bitField0_ & ~0x00000004); deletedEntriesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getDeletedEntriesFieldBuilder() : null; } else { deletedEntriesBuilder_.addAllMessages(other.deletedEntries_); } } } if (other.hasIsFromEarlier()) { setIsFromEarlier(other.getIsFromEarlier()); } if (other.hasCursor()) { mergeCursor(other.getCursor()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasIsFromEarlier()) { return false; } for (int i = 0; i < getModifiedEntriesCount(); i++) { if (!getModifiedEntries(i).isInitialized()) { return false; } } for (int i = 0; i < getCreatedEntriesCount(); i++) { if (!getCreatedEntries(i).isInitialized()) { return false; } } for (int i = 0; i < getDeletedEntriesCount(); i++) { if (!getDeletedEntries(i).isInitialized()) { return false; } } if (hasCursor()) { if (!getCursor().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List modifiedEntries_ = java.util.Collections.emptyList(); private void ensureModifiedEntriesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { modifiedEntries_ = new java.util.ArrayList(modifiedEntries_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> modifiedEntriesBuilder_; /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesList() { if (modifiedEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(modifiedEntries_); } else { return modifiedEntriesBuilder_.getMessageList(); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public int getModifiedEntriesCount() { if (modifiedEntriesBuilder_ == null) { return modifiedEntries_.size(); } else { return modifiedEntriesBuilder_.getCount(); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index) { if (modifiedEntriesBuilder_ == null) { return modifiedEntries_.get(index); } else { return modifiedEntriesBuilder_.getMessage(index); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder setModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (modifiedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureModifiedEntriesIsMutable(); modifiedEntries_.set(index, value); onChanged(); } else { modifiedEntriesBuilder_.setMessage(index, value); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder setModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.set(index, builderForValue.build()); onChanged(); } else { modifiedEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (modifiedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureModifiedEntriesIsMutable(); modifiedEntries_.add(value); onChanged(); } else { modifiedEntriesBuilder_.addMessage(value); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (modifiedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureModifiedEntriesIsMutable(); modifiedEntries_.add(index, value); onChanged(); } else { modifiedEntriesBuilder_.addMessage(index, value); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.add(builderForValue.build()); onChanged(); } else { modifiedEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addModifiedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.add(index, builderForValue.build()); onChanged(); } else { modifiedEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder addAllModifiedEntries( java.lang.Iterable values) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, modifiedEntries_); onChanged(); } else { modifiedEntriesBuilder_.addAllMessages(values); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder clearModifiedEntries() { if (modifiedEntriesBuilder_ == null) { modifiedEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { modifiedEntriesBuilder_.clear(); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public Builder removeModifiedEntries(int index) { if (modifiedEntriesBuilder_ == null) { ensureModifiedEntriesIsMutable(); modifiedEntries_.remove(index); onChanged(); } else { modifiedEntriesBuilder_.remove(index); } return this; } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getModifiedEntriesBuilder( int index) { return getModifiedEntriesFieldBuilder().getBuilder(index); } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder( int index) { if (modifiedEntriesBuilder_ == null) { return modifiedEntries_.get(index); } else { return modifiedEntriesBuilder_.getMessageOrBuilder(index); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesOrBuilderList() { if (modifiedEntriesBuilder_ != null) { return modifiedEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(modifiedEntries_); } } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addModifiedEntriesBuilder() { return getModifiedEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addModifiedEntriesBuilder( int index) { return getModifiedEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** *
       * full path of the directory where snapshots were taken
       * 
* * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1; */ public java.util.List getModifiedEntriesBuilderList() { return getModifiedEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> getModifiedEntriesFieldBuilder() { if (modifiedEntriesBuilder_ == null) { modifiedEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>( modifiedEntries_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); modifiedEntries_ = null; } return modifiedEntriesBuilder_; } private java.util.List createdEntries_ = java.util.Collections.emptyList(); private void ensureCreatedEntriesIsMutable() { if (!((bitField0_ & 0x00000002) != 0)) { createdEntries_ = new java.util.ArrayList(createdEntries_); bitField0_ |= 0x00000002; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> createdEntriesBuilder_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesList() { if (createdEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(createdEntries_); } else { return createdEntriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public int getCreatedEntriesCount() { if (createdEntriesBuilder_ == null) { return createdEntries_.size(); } else { return createdEntriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index) { if (createdEntriesBuilder_ == null) { return createdEntries_.get(index); } else { return createdEntriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder setCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (createdEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCreatedEntriesIsMutable(); createdEntries_.set(index, value); onChanged(); } else { createdEntriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder setCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.set(index, builderForValue.build()); onChanged(); } else { createdEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (createdEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCreatedEntriesIsMutable(); createdEntries_.add(value); onChanged(); } else { createdEntriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (createdEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCreatedEntriesIsMutable(); createdEntries_.add(index, value); onChanged(); } else { createdEntriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.add(builderForValue.build()); onChanged(); } else { createdEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addCreatedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.add(index, builderForValue.build()); onChanged(); } else { createdEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder addAllCreatedEntries( java.lang.Iterable values) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, createdEntries_); onChanged(); } else { createdEntriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder clearCreatedEntries() { if (createdEntriesBuilder_ == null) { createdEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { createdEntriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public Builder removeCreatedEntries(int index) { if (createdEntriesBuilder_ == null) { ensureCreatedEntriesIsMutable(); createdEntries_.remove(index); onChanged(); } else { createdEntriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getCreatedEntriesBuilder( int index) { return getCreatedEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder( int index) { if (createdEntriesBuilder_ == null) { return createdEntries_.get(index); } else { return createdEntriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesOrBuilderList() { if (createdEntriesBuilder_ != null) { return createdEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(createdEntries_); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addCreatedEntriesBuilder() { return getCreatedEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addCreatedEntriesBuilder( int index) { return getCreatedEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2; */ public java.util.List getCreatedEntriesBuilderList() { return getCreatedEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> getCreatedEntriesFieldBuilder() { if (createdEntriesBuilder_ == null) { createdEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>( createdEntries_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); createdEntries_ = null; } return createdEntriesBuilder_; } private java.util.List deletedEntries_ = java.util.Collections.emptyList(); private void ensureDeletedEntriesIsMutable() { if (!((bitField0_ & 0x00000004) != 0)) { deletedEntries_ = new java.util.ArrayList(deletedEntries_); bitField0_ |= 0x00000004; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> deletedEntriesBuilder_; /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesList() { if (deletedEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(deletedEntries_); } else { return deletedEntriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public int getDeletedEntriesCount() { if (deletedEntriesBuilder_ == null) { return deletedEntries_.size(); } else { return deletedEntriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index) { if (deletedEntriesBuilder_ == null) { return deletedEntries_.get(index); } else { return deletedEntriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder setDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (deletedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDeletedEntriesIsMutable(); deletedEntries_.set(index, value); onChanged(); } else { deletedEntriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder setDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.set(index, builderForValue.build()); onChanged(); } else { deletedEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (deletedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDeletedEntriesIsMutable(); deletedEntries_.add(value); onChanged(); } else { deletedEntriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) { if (deletedEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDeletedEntriesIsMutable(); deletedEntries_.add(index, value); onChanged(); } else { deletedEntriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.add(builderForValue.build()); onChanged(); } else { deletedEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addDeletedEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.add(index, builderForValue.build()); onChanged(); } else { deletedEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder addAllDeletedEntries( java.lang.Iterable values) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, deletedEntries_); onChanged(); } else { deletedEntriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder clearDeletedEntries() { if (deletedEntriesBuilder_ == null) { deletedEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { deletedEntriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public Builder removeDeletedEntries(int index) { if (deletedEntriesBuilder_ == null) { ensureDeletedEntriesIsMutable(); deletedEntries_.remove(index); onChanged(); } else { deletedEntriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getDeletedEntriesBuilder( int index) { return getDeletedEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder( int index) { if (deletedEntriesBuilder_ == null) { return deletedEntries_.get(index); } else { return deletedEntriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesOrBuilderList() { if (deletedEntriesBuilder_ != null) { return deletedEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(deletedEntries_); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addDeletedEntriesBuilder() { return getDeletedEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addDeletedEntriesBuilder( int index) { return getDeletedEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3; */ public java.util.List getDeletedEntriesBuilderList() { return getDeletedEntriesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> getDeletedEntriesFieldBuilder() { if (deletedEntriesBuilder_ == null) { deletedEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>( deletedEntries_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); deletedEntries_ = null; } return deletedEntriesBuilder_; } private boolean isFromEarlier_ ; /** * required bool isFromEarlier = 4; */ public boolean hasIsFromEarlier() { return ((bitField0_ & 0x00000008) != 0); } /** * required bool isFromEarlier = 4; */ public boolean getIsFromEarlier() { return isFromEarlier_; } /** * required bool isFromEarlier = 4; */ public Builder setIsFromEarlier(boolean value) { bitField0_ |= 0x00000008; isFromEarlier_ = value; onChanged(); return this; } /** * required bool isFromEarlier = 4; */ public Builder clearIsFromEarlier() { bitField0_ = (bitField0_ & ~0x00000008); isFromEarlier_ = false; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> cursorBuilder_; /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public boolean hasCursor() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() { if (cursorBuilder_ == null) { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } else { return cursorBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder setCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) { if (cursorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } cursor_ = value; onChanged(); } else { cursorBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder setCursor( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder builderForValue) { if (cursorBuilder_ == null) { cursor_ = builderForValue.build(); onChanged(); } else { cursorBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder mergeCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) { if (cursorBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && cursor_ != null && cursor_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance()) { cursor_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.newBuilder(cursor_).mergeFrom(value).buildPartial(); } else { cursor_ = value; } onChanged(); } else { cursorBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public Builder clearCursor() { if (cursorBuilder_ == null) { cursor_ = null; onChanged(); } else { cursorBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder getCursorBuilder() { bitField0_ |= 0x00000010; onChanged(); return getCursorFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() { if (cursorBuilder_ != null) { return cursorBuilder_.getMessageOrBuilder(); } else { return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_; } } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> getCursorFieldBuilder() { if (cursorBuilder_ == null) { cursorBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder>( getCursor(), getParentForChildren(), isClean()); cursor_ = null; } return cursorBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportListingProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportListingProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffReportListingProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotDiffReportListingProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 blockId = 1; */ boolean hasBlockId(); /** * required uint64 blockId = 1; */ long getBlockId(); /** * required uint64 genStamp = 2; */ boolean hasGenStamp(); /** * required uint64 genStamp = 2; */ long getGenStamp(); /** * optional uint64 numBytes = 3 [default = 0]; */ boolean hasNumBytes(); /** * optional uint64 numBytes = 3 [default = 0]; */ long getNumBytes(); } /** *
   **
   * Block information
   * Please be wary of adding additional fields here, since INodeFiles
   * need to fit in PB's default max message size of 64MB.
   * We restrict the max # of blocks per file
   * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
   * to avoid changing this.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockProto} */ public static final class BlockProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockProto) BlockProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockProto.newBuilder() to construct. private BlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; blockId_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; genStamp_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; numBytes_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); } private int bitField0_; public static final int BLOCKID_FIELD_NUMBER = 1; private long blockId_; /** * required uint64 blockId = 1; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockId = 1; */ public long getBlockId() { return blockId_; } public static final int GENSTAMP_FIELD_NUMBER = 2; private long genStamp_; /** * required uint64 genStamp = 2; */ public boolean hasGenStamp() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 genStamp = 2; */ public long getGenStamp() { return genStamp_; } public static final int NUMBYTES_FIELD_NUMBER = 3; private long numBytes_; /** * optional uint64 numBytes = 3 [default = 0]; */ public boolean hasNumBytes() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 numBytes = 3 [default = 0]; */ public long getNumBytes() { return numBytes_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasBlockId()) { memoizedIsInitialized = 0; return false; } if (!hasGenStamp()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, blockId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, genStamp_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, numBytes_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, blockId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, genStamp_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, numBytes_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) obj; if (hasBlockId() != other.hasBlockId()) return false; if (hasBlockId()) { if (getBlockId() != other.getBlockId()) return false; } if (hasGenStamp() != other.hasGenStamp()) return false; if (hasGenStamp()) { if (getGenStamp() != other.getGenStamp()) return false; } if (hasNumBytes() != other.hasNumBytes()) return false; if (hasNumBytes()) { if (getNumBytes() != other.getNumBytes()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockId()); } if (hasGenStamp()) { hash = (37 * hash) + GENSTAMP_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getGenStamp()); } if (hasNumBytes()) { hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumBytes()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Block information
     * Please be wary of adding additional fields here, since INodeFiles
     * need to fit in PB's default max message size of 64MB.
     * We restrict the max # of blocks per file
     * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
     * to avoid changing this.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); blockId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); genStamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); numBytes_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.blockId_ = blockId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.genStamp_ = genStamp_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.numBytes_ = numBytes_; to_bitField0_ |= 0x00000004; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) return this; if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (other.hasGenStamp()) { setGenStamp(other.getGenStamp()); } if (other.hasNumBytes()) { setNumBytes(other.getNumBytes()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasBlockId()) { return false; } if (!hasGenStamp()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long blockId_ ; /** * required uint64 blockId = 1; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 blockId = 1; */ public long getBlockId() { return blockId_; } /** * required uint64 blockId = 1; */ public Builder setBlockId(long value) { bitField0_ |= 0x00000001; blockId_ = value; onChanged(); return this; } /** * required uint64 blockId = 1; */ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000001); blockId_ = 0L; onChanged(); return this; } private long genStamp_ ; /** * required uint64 genStamp = 2; */ public boolean hasGenStamp() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 genStamp = 2; */ public long getGenStamp() { return genStamp_; } /** * required uint64 genStamp = 2; */ public Builder setGenStamp(long value) { bitField0_ |= 0x00000002; genStamp_ = value; onChanged(); return this; } /** * required uint64 genStamp = 2; */ public Builder clearGenStamp() { bitField0_ = (bitField0_ & ~0x00000002); genStamp_ = 0L; onChanged(); return this; } private long numBytes_ ; /** * optional uint64 numBytes = 3 [default = 0]; */ public boolean hasNumBytes() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 numBytes = 3 [default = 0]; */ public long getNumBytes() { return numBytes_; } /** * optional uint64 numBytes = 3 [default = 0]; */ public Builder setNumBytes(long value) { bitField0_ |= 0x00000004; numBytes_ = value; onChanged(); return this; } /** * optional uint64 numBytes = 3 [default = 0]; */ public Builder clearNumBytes() { bitField0_ = (bitField0_ & ~0x00000004); numBytes_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new BlockProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotInfoProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotInfoProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string snapshotName = 1; */ boolean hasSnapshotName(); /** * required string snapshotName = 1; */ java.lang.String getSnapshotName(); /** * required string snapshotName = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotNameBytes(); /** * required string snapshotRoot = 2; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 2; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ boolean hasPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); /** * required string owner = 4; */ boolean hasOwner(); /** * required string owner = 4; */ java.lang.String getOwner(); /** * required string owner = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes(); /** * required string group = 5; */ boolean hasGroup(); /** * required string group = 5; */ java.lang.String getGroup(); /** * required string group = 5; */ org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes(); /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; */ boolean hasCreateTime(); /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; */ java.lang.String getCreateTime(); /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; */ org.apache.hadoop.thirdparty.protobuf.ByteString getCreateTimeBytes(); } /** *
   **
   * Information related to a snapshot
   * TODO: add more information
   * 
* * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto} */ public static final class SnapshotInfoProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotInfoProto) SnapshotInfoProtoOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotInfoProto.newBuilder() to construct. private SnapshotInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotInfoProto() { snapshotName_ = ""; snapshotRoot_ = ""; owner_ = ""; group_ = ""; createTime_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotInfoProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; snapshotName_ = bs; break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; snapshotRoot_ = bs; break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) != 0)) { subBuilder = permission_.toBuilder(); } permission_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(permission_); permission_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 34: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000008; owner_ = bs; break; } case 42: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000010; group_ = bs; break; } case 50: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000020; createTime_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class); } private int bitField0_; public static final int SNAPSHOTNAME_FIELD_NUMBER = 1; private volatile java.lang.Object snapshotName_; /** * required string snapshotName = 1; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string snapshotName = 1; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotName_ = s; } return s; } } /** * required string snapshotName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int SNAPSHOTROOT_FIELD_NUMBER = 2; private volatile java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 2; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string snapshotRoot = 2; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int PERMISSION_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public boolean hasPermission() { return ((bitField0_ & 0x00000004) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } public static final int OWNER_FIELD_NUMBER = 4; private volatile java.lang.Object owner_; /** * required string owner = 4; */ public boolean hasOwner() { return ((bitField0_ & 0x00000008) != 0); } /** * required string owner = 4; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } } /** * required string owner = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int GROUP_FIELD_NUMBER = 5; private volatile java.lang.Object group_; /** * required string group = 5; */ public boolean hasGroup() { return ((bitField0_ & 0x00000010) != 0); } /** * required string group = 5; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } } /** * required string group = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int CREATETIME_FIELD_NUMBER = 6; private volatile java.lang.Object createTime_; /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; */ public boolean hasCreateTime() { return ((bitField0_ & 0x00000020) != 0); } /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; */ public java.lang.String getCreateTime() { java.lang.Object ref = createTime_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { createTime_ = s; } return s; } } /** *
     * TODO: do we need access time?
     * 
* * required string createTime = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCreateTimeBytes() { java.lang.Object ref = createTime_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); createTime_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSnapshotName()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasPermission()) { memoizedIsInitialized = 0; return false; } if (!hasOwner()) { memoizedIsInitialized = 0; return false; } if (!hasGroup()) { memoizedIsInitialized = 0; return false; } if (!hasCreateTime()) { memoizedIsInitialized = 0; return false; } if (!getPermission().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, snapshotName_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, snapshotRoot_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeMessage(3, getPermission()); } if (((bitField0_ & 0x00000008) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, owner_); } if (((bitField0_ & 0x00000010) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, group_); } if (((bitField0_ & 0x00000020) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, createTime_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, snapshotName_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, snapshotRoot_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(3, getPermission()); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, owner_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, group_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, createTime_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) obj; if (hasSnapshotName() != other.hasSnapshotName()) return false; if (hasSnapshotName()) { if (!getSnapshotName() .equals(other.getSnapshotName())) return false; } if (hasSnapshotRoot() != other.hasSnapshotRoot()) return false; if (hasSnapshotRoot()) { if (!getSnapshotRoot() .equals(other.getSnapshotRoot())) return false; } if (hasPermission() != other.hasPermission()) return false; if (hasPermission()) { if (!getPermission() .equals(other.getPermission())) return false; } if (hasOwner() != other.hasOwner()) return false; if (hasOwner()) { if (!getOwner() .equals(other.getOwner())) return false; } if (hasGroup() != other.hasGroup()) return false; if (hasGroup()) { if (!getGroup() .equals(other.getGroup())) return false; } if (hasCreateTime() != other.hasCreateTime()) return false; if (hasCreateTime()) { if (!getCreateTime() .equals(other.getCreateTime())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotName()) { hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; hash = (53 * hash) + getSnapshotName().hashCode(); } if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + getPermission().hashCode(); } if (hasOwner()) { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } if (hasGroup()) { hash = (37 * hash) + GROUP_FIELD_NUMBER; hash = (53 * hash) + getGroup().hashCode(); } if (hasCreateTime()) { hash = (37 * hash) + CREATETIME_FIELD_NUMBER; hash = (53 * hash) + getCreateTime().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Information related to a snapshot
     * TODO: add more information
     * 
* * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotInfoProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPermissionFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); snapshotName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (permissionBuilder_ == null) { permission_ = null; } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); owner_ = ""; bitField0_ = (bitField0_ & ~0x00000008); group_ = ""; bitField0_ = (bitField0_ & ~0x00000010); createTime_ = ""; bitField0_ = (bitField0_ & ~0x00000020); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.snapshotName_ = snapshotName_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000004) != 0)) { if (permissionBuilder_ == null) { result.permission_ = permission_; } else { result.permission_ = permissionBuilder_.build(); } to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.owner_ = owner_; if (((from_bitField0_ & 0x00000010) != 0)) { to_bitField0_ |= 0x00000010; } result.group_ = group_; if (((from_bitField0_ & 0x00000020) != 0)) { to_bitField0_ |= 0x00000020; } result.createTime_ = createTime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance()) return this; if (other.hasSnapshotName()) { bitField0_ |= 0x00000001; snapshotName_ = other.snapshotName_; onChanged(); } if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000002; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasPermission()) { mergePermission(other.getPermission()); } if (other.hasOwner()) { bitField0_ |= 0x00000008; owner_ = other.owner_; onChanged(); } if (other.hasGroup()) { bitField0_ |= 0x00000010; group_ = other.group_; onChanged(); } if (other.hasCreateTime()) { bitField0_ |= 0x00000020; createTime_ = other.createTime_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSnapshotName()) { return false; } if (!hasSnapshotRoot()) { return false; } if (!hasPermission()) { return false; } if (!hasOwner()) { return false; } if (!hasGroup()) { return false; } if (!hasCreateTime()) { return false; } if (!getPermission().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object snapshotName_ = ""; /** * required string snapshotName = 1; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000001) != 0); } /** * required string snapshotName = 1; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string snapshotName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string snapshotName = 1; */ public Builder setSnapshotName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotName_ = value; onChanged(); return this; } /** * required string snapshotName = 1; */ public Builder clearSnapshotName() { bitField0_ = (bitField0_ & ~0x00000001); snapshotName_ = getDefaultInstance().getSnapshotName(); onChanged(); return this; } /** * required string snapshotName = 1; */ public Builder setSnapshotNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotName_ = value; onChanged(); return this; } private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 2; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000002) != 0); } /** * required string snapshotRoot = 2; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 2; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 2; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000002); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 2; */ public Builder setSnapshotRootBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotRoot_ = value; onChanged(); return this; } private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_; /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public boolean hasPermission() { return ((bitField0_ & 0x00000004) != 0); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { if (permissionBuilder_ == null) { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } else { return permissionBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } permission_ = value; onChanged(); } else { permissionBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder setPermission( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (permissionBuilder_ == null) { permission_ = builderForValue.build(); onChanged(); } else { permissionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0) && permission_ != null && permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial(); } else { permission_ = value; } onChanged(); } else { permissionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder clearPermission() { if (permissionBuilder_ == null) { permission_ = null; onChanged(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() { bitField0_ |= 0x00000004; onChanged(); return getPermissionFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { if (permissionBuilder_ != null) { return permissionBuilder_.getMessageOrBuilder(); } else { return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_; } } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getPermissionFieldBuilder() { if (permissionBuilder_ == null) { permissionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( getPermission(), getParentForChildren(), isClean()); permission_ = null; } return permissionBuilder_; } private java.lang.Object owner_ = ""; /** * required string owner = 4; */ public boolean hasOwner() { return ((bitField0_ & 0x00000008) != 0); } /** * required string owner = 4; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string owner = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string owner = 4; */ public Builder setOwner( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; owner_ = value; onChanged(); return this; } /** * required string owner = 4; */ public Builder clearOwner() { bitField0_ = (bitField0_ & ~0x00000008); owner_ = getDefaultInstance().getOwner(); onChanged(); return this; } /** * required string owner = 4; */ public Builder setOwnerBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; owner_ = value; onChanged(); return this; } private java.lang.Object group_ = ""; /** * required string group = 5; */ public boolean hasGroup() { return ((bitField0_ & 0x00000010) != 0); } /** * required string group = 5; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string group = 5; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string group = 5; */ public Builder setGroup( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; group_ = value; onChanged(); return this; } /** * required string group = 5; */ public Builder clearGroup() { bitField0_ = (bitField0_ & ~0x00000010); group_ = getDefaultInstance().getGroup(); onChanged(); return this; } /** * required string group = 5; */ public Builder setGroupBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; group_ = value; onChanged(); return this; } private java.lang.Object createTime_ = ""; /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; */ public boolean hasCreateTime() { return ((bitField0_ & 0x00000020) != 0); } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; */ public java.lang.String getCreateTime() { java.lang.Object ref = createTime_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { createTime_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCreateTimeBytes() { java.lang.Object ref = createTime_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); createTime_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; */ public Builder setCreateTime( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; createTime_ = value; onChanged(); return this; } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; */ public Builder clearCreateTime() { bitField0_ = (bitField0_ & ~0x00000020); createTime_ = getDefaultInstance().getCreateTime(); onChanged(); return this; } /** *
       * TODO: do we need access time?
       * 
* * required string createTime = 6; */ public Builder setCreateTimeBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; createTime_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotInfoProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotInfoProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotInfoProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotInfoProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RollingUpgradeStatusProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RollingUpgradeStatusProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string blockPoolId = 1; */ boolean hasBlockPoolId(); /** * required string blockPoolId = 1; */ java.lang.String getBlockPoolId(); /** * required string blockPoolId = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes(); /** * optional bool finalized = 2 [default = false]; */ boolean hasFinalized(); /** * optional bool finalized = 2 [default = false]; */ boolean getFinalized(); } /** *
   **
   * Rolling upgrade status
   * 
* * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto} */ public static final class RollingUpgradeStatusProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.RollingUpgradeStatusProto) RollingUpgradeStatusProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RollingUpgradeStatusProto.newBuilder() to construct. private RollingUpgradeStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RollingUpgradeStatusProto() { blockPoolId_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollingUpgradeStatusProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; blockPoolId_ = bs; break; } case 16: { bitField0_ |= 0x00000002; finalized_ = input.readBool(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class); } private int bitField0_; public static final int BLOCKPOOLID_FIELD_NUMBER = 1; private volatile java.lang.Object blockPoolId_; /** * required string blockPoolId = 1; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** * required string blockPoolId = 1; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * required string blockPoolId = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int FINALIZED_FIELD_NUMBER = 2; private boolean finalized_; /** * optional bool finalized = 2 [default = false]; */ public boolean hasFinalized() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool finalized = 2 [default = false]; */ public boolean getFinalized() { return finalized_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasBlockPoolId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, blockPoolId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBool(2, finalized_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, blockPoolId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(2, finalized_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) obj; if (hasBlockPoolId() != other.hasBlockPoolId()) return false; if (hasBlockPoolId()) { if (!getBlockPoolId() .equals(other.getBlockPoolId())) return false; } if (hasFinalized() != other.hasFinalized()) return false; if (hasFinalized()) { if (getFinalized() != other.getFinalized()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasFinalized()) { hash = (37 * hash) + FINALIZED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getFinalized()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Rolling upgrade status
     * 
* * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RollingUpgradeStatusProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); blockPoolId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); finalized_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.blockPoolId_ = blockPoolId_; if (((from_bitField0_ & 0x00000002) != 0)) { result.finalized_ = finalized_; to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) return this; if (other.hasBlockPoolId()) { bitField0_ |= 0x00000001; blockPoolId_ = other.blockPoolId_; onChanged(); } if (other.hasFinalized()) { setFinalized(other.getFinalized()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasBlockPoolId()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 1; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) != 0); } /** * required string blockPoolId = 1; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolId = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string blockPoolId = 1; */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockPoolId_ = value; onChanged(); return this; } /** * required string blockPoolId = 1; */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000001); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /** * required string blockPoolId = 1; */ public Builder setBlockPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockPoolId_ = value; onChanged(); return this; } private boolean finalized_ ; /** * optional bool finalized = 2 [default = false]; */ public boolean hasFinalized() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bool finalized = 2 [default = false]; */ public boolean getFinalized() { return finalized_; } /** * optional bool finalized = 2 [default = false]; */ public Builder setFinalized(boolean value) { bitField0_ |= 0x00000002; finalized_ = value; onChanged(); return this; } /** * optional bool finalized = 2 [default = false]; */ public Builder clearFinalized() { bitField0_ = (bitField0_ & ~0x00000002); finalized_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollingUpgradeStatusProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollingUpgradeStatusProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RollingUpgradeStatusProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new RollingUpgradeStatusProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StorageUuidsProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageUuidsProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated string storageUuids = 1; */ java.util.List getStorageUuidsList(); /** * repeated string storageUuids = 1; */ int getStorageUuidsCount(); /** * repeated string storageUuids = 1; */ java.lang.String getStorageUuids(int index); /** * repeated string storageUuids = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidsBytes(int index); } /** *
   **
   * A list of storage IDs.
   * 
* * Protobuf type {@code hadoop.hdfs.StorageUuidsProto} */ public static final class StorageUuidsProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageUuidsProto) StorageUuidsProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StorageUuidsProto.newBuilder() to construct. private StorageUuidsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StorageUuidsProto() { storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageUuidsProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); if (!((mutable_bitField0_ & 0x00000001) != 0)) { storageUuids_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000001; } storageUuids_.add(bs); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { storageUuids_ = storageUuids_.getUnmodifiableView(); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class); } public static final int STORAGEUUIDS_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageUuids_; /** * repeated string storageUuids = 1; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageUuidsList() { return storageUuids_; } /** * repeated string storageUuids = 1; */ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 1; */ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < storageUuids_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuids_.getRaw(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < storageUuids_.size(); i++) { dataSize += computeStringSizeNoTag(storageUuids_.getRaw(i)); } size += dataSize; size += 1 * getStorageUuidsList().size(); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) obj; if (!getStorageUuidsList() .equals(other.getStorageUuidsList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getStorageUuidsCount() > 0) { hash = (37 * hash) + STORAGEUUIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageUuidsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * A list of storage IDs.
     * 
* * Protobuf type {@code hadoop.hdfs.StorageUuidsProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageUuidsProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) != 0)) { storageUuids_ = storageUuids_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000001); } result.storageUuids_ = storageUuids_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance()) return this; if (!other.storageUuids_.isEmpty()) { if (storageUuids_.isEmpty()) { storageUuids_ = other.storageUuids_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureStorageUuidsIsMutable(); storageUuids_.addAll(other.storageUuids_); } onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageUuidsIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { storageUuids_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageUuids_); bitField0_ |= 0x00000001; } } /** * repeated string storageUuids = 1; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageUuidsList() { return storageUuids_.getUnmodifiableView(); } /** * repeated string storageUuids = 1; */ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 1; */ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } /** * repeated string storageUuids = 1; */ public Builder setStorageUuids( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.set(index, value); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder addStorageUuids( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder addAllStorageUuids( java.lang.Iterable values) { ensureStorageUuidsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, storageUuids_); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder clearStorageUuids() { storageUuids_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder addStorageUuidsBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageUuidsProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageUuidsProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StorageUuidsProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StorageUuidsProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BlockTokenSecretProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockTokenSecretProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 expiryDate = 1; */ boolean hasExpiryDate(); /** * optional uint64 expiryDate = 1; */ long getExpiryDate(); /** * optional uint32 keyId = 2; */ boolean hasKeyId(); /** * optional uint32 keyId = 2; */ int getKeyId(); /** * optional string userId = 3; */ boolean hasUserId(); /** * optional string userId = 3; */ java.lang.String getUserId(); /** * optional string userId = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getUserIdBytes(); /** * optional string blockPoolId = 4; */ boolean hasBlockPoolId(); /** * optional string blockPoolId = 4; */ java.lang.String getBlockPoolId(); /** * optional string blockPoolId = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes(); /** * optional uint64 blockId = 5; */ boolean hasBlockId(); /** * optional uint64 blockId = 5; */ long getBlockId(); /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ java.util.List getModesList(); /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ int getModesCount(); /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); /** * repeated string storageIds = 8; */ java.util.List getStorageIdsList(); /** * repeated string storageIds = 8; */ int getStorageIdsCount(); /** * repeated string storageIds = 8; */ java.lang.String getStorageIds(int index); /** * repeated string storageIds = 8; */ org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIdsBytes(int index); /** * optional bytes handshakeSecret = 9; */ boolean hasHandshakeSecret(); /** * optional bytes handshakeSecret = 9; */ org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret(); } /** *
   **
   * Secret information for the BlockKeyProto. This is not sent on the wire as
   * such but is used to pack a byte array and encrypted and put in
   * BlockKeyProto.bytes
   * When adding further fields, make sure they are optional as they would
   * otherwise not be backwards compatible.
   * Note: As part of the migration from WritableUtils based tokens (aka "legacy")
   * to Protocol Buffers, we use the first byte to determine the type. If the
   * first byte is <=0 then it is a legacy token. This means that when using
   * protobuf tokens, the the first field sent must have a `field_number` less
   * than 16 to make sure that the first byte is positive. Otherwise it could be
   * parsed as a legacy token. See HDFS-11026 for more discussion.
   * 
* * Protobuf type {@code hadoop.hdfs.BlockTokenSecretProto} */ public static final class BlockTokenSecretProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockTokenSecretProto) BlockTokenSecretProtoOrBuilder { private static final long serialVersionUID = 0L; // Use BlockTokenSecretProto.newBuilder() to construct. private BlockTokenSecretProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BlockTokenSecretProto() { userId_ = ""; blockPoolId_ = ""; modes_ = java.util.Collections.emptyList(); storageTypes_ = java.util.Collections.emptyList(); storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockTokenSecretProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; expiryDate_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; keyId_ = input.readUInt32(); break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; userId_ = bs; break; } case 34: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000008; blockPoolId_ = bs; break; } case 40: { bitField0_ |= 0x00000010; blockId_ = input.readUInt64(); break; } case 48: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(6, rawValue); } else { if (!((mutable_bitField0_ & 0x00000020) != 0)) { modes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000020; } modes_.add(rawValue); } break; } case 50: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(6, rawValue); } else { if (!((mutable_bitField0_ & 0x00000020) != 0)) { modes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000020; } modes_.add(rawValue); } } input.popLimit(oldLimit); break; } case 56: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } storageTypes_.add(rawValue); } break; } case 58: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } storageTypes_.add(rawValue); } } input.popLimit(oldLimit); break; } case 66: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); if (!((mutable_bitField0_ & 0x00000080) != 0)) { storageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000080; } storageIds_.add(bs); break; } case 74: { bitField0_ |= 0x00000020; handshakeSecret_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000020) != 0)) { modes_ = java.util.Collections.unmodifiableList(modes_); } if (((mutable_bitField0_ & 0x00000040) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); } if (((mutable_bitField0_ & 0x00000080) != 0)) { storageIds_ = storageIds_.getUnmodifiableView(); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.Builder.class); } private int bitField0_; public static final int EXPIRYDATE_FIELD_NUMBER = 1; private long expiryDate_; /** * optional uint64 expiryDate = 1; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 expiryDate = 1; */ public long getExpiryDate() { return expiryDate_; } public static final int KEYID_FIELD_NUMBER = 2; private int keyId_; /** * optional uint32 keyId = 2; */ public boolean hasKeyId() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 keyId = 2; */ public int getKeyId() { return keyId_; } public static final int USERID_FIELD_NUMBER = 3; private volatile java.lang.Object userId_; /** * optional string userId = 3; */ public boolean hasUserId() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string userId = 3; */ public java.lang.String getUserId() { java.lang.Object ref = userId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { userId_ = s; } return s; } } /** * optional string userId = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getUserIdBytes() { java.lang.Object ref = userId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); userId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int BLOCKPOOLID_FIELD_NUMBER = 4; private volatile java.lang.Object blockPoolId_; /** * optional string blockPoolId = 4; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000008) != 0); } /** * optional string blockPoolId = 4; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * optional string blockPoolId = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int BLOCKID_FIELD_NUMBER = 5; private long blockId_; /** * optional uint64 blockId = 5; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockId = 5; */ public long getBlockId() { return blockId_; } public static final int MODES_FIELD_NUMBER = 6; private java.util.List modes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto> modes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto convert(java.lang.Integer from) { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.valueOf(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.READ : result; } }; /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public java.util.List getModesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>(modes_, modes_converter_); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public int getModesCount() { return modes_.size(); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index) { return modes_converter_.convert(modes_.get(index)); } public static final int STORAGETYPES_FIELD_NUMBER = 7; private java.util.List storageTypes_; private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ = new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() { public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(from); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } }; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } public static final int STORAGEIDS_FIELD_NUMBER = 8; private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIds_; /** * repeated string storageIds = 8; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIdsList() { return storageIds_; } /** * repeated string storageIds = 8; */ public int getStorageIdsCount() { return storageIds_.size(); } /** * repeated string storageIds = 8; */ public java.lang.String getStorageIds(int index) { return storageIds_.get(index); } /** * repeated string storageIds = 8; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIdsBytes(int index) { return storageIds_.getByteString(index); } public static final int HANDSHAKESECRET_FIELD_NUMBER = 9; private org.apache.hadoop.thirdparty.protobuf.ByteString handshakeSecret_; /** * optional bytes handshakeSecret = 9; */ public boolean hasHandshakeSecret() { return ((bitField0_ & 0x00000020) != 0); } /** * optional bytes handshakeSecret = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret() { return handshakeSecret_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, expiryDate_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, keyId_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, userId_); } if (((bitField0_ & 0x00000008) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, blockPoolId_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, blockId_); } for (int i = 0; i < modes_.size(); i++) { output.writeEnum(6, modes_.get(i)); } for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(7, storageTypes_.get(i)); } for (int i = 0; i < storageIds_.size(); i++) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, storageIds_.getRaw(i)); } if (((bitField0_ & 0x00000020) != 0)) { output.writeBytes(9, handshakeSecret_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, expiryDate_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, keyId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, userId_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, blockPoolId_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, blockId_); } { int dataSize = 0; for (int i = 0; i < modes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(modes_.get(i)); } size += dataSize; size += 1 * modes_.size(); } { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i)); } size += dataSize; size += 1 * storageTypes_.size(); } { int dataSize = 0; for (int i = 0; i < storageIds_.size(); i++) { dataSize += computeStringSizeNoTag(storageIds_.getRaw(i)); } size += dataSize; size += 1 * getStorageIdsList().size(); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(9, handshakeSecret_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto) obj; if (hasExpiryDate() != other.hasExpiryDate()) return false; if (hasExpiryDate()) { if (getExpiryDate() != other.getExpiryDate()) return false; } if (hasKeyId() != other.hasKeyId()) return false; if (hasKeyId()) { if (getKeyId() != other.getKeyId()) return false; } if (hasUserId() != other.hasUserId()) return false; if (hasUserId()) { if (!getUserId() .equals(other.getUserId())) return false; } if (hasBlockPoolId() != other.hasBlockPoolId()) return false; if (hasBlockPoolId()) { if (!getBlockPoolId() .equals(other.getBlockPoolId())) return false; } if (hasBlockId() != other.hasBlockId()) return false; if (hasBlockId()) { if (getBlockId() != other.getBlockId()) return false; } if (!modes_.equals(other.modes_)) return false; if (!storageTypes_.equals(other.storageTypes_)) return false; if (!getStorageIdsList() .equals(other.getStorageIdsList())) return false; if (hasHandshakeSecret() != other.hasHandshakeSecret()) return false; if (hasHandshakeSecret()) { if (!getHandshakeSecret() .equals(other.getHandshakeSecret())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getExpiryDate()); } if (hasKeyId()) { hash = (37 * hash) + KEYID_FIELD_NUMBER; hash = (53 * hash) + getKeyId(); } if (hasUserId()) { hash = (37 * hash) + USERID_FIELD_NUMBER; hash = (53 * hash) + getUserId().hashCode(); } if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getBlockId()); } if (getModesCount() > 0) { hash = (37 * hash) + MODES_FIELD_NUMBER; hash = (53 * hash) + modes_.hashCode(); } if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + storageTypes_.hashCode(); } if (getStorageIdsCount() > 0) { hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageIdsList().hashCode(); } if (hasHandshakeSecret()) { hash = (37 * hash) + HANDSHAKESECRET_FIELD_NUMBER; hash = (53 * hash) + getHandshakeSecret().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Secret information for the BlockKeyProto. This is not sent on the wire as
     * such but is used to pack a byte array and encrypted and put in
     * BlockKeyProto.bytes
     * When adding further fields, make sure they are optional as they would
     * otherwise not be backwards compatible.
     * Note: As part of the migration from WritableUtils based tokens (aka "legacy")
     * to Protocol Buffers, we use the first byte to determine the type. If the
     * first byte is <=0 then it is a legacy token. This means that when using
     * protobuf tokens, the the first field sent must have a `field_number` less
     * than 16 to make sure that the first byte is positive. Otherwise it could be
     * parsed as a legacy token. See HDFS-11026 for more discussion.
     * 
* * Protobuf type {@code hadoop.hdfs.BlockTokenSecretProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockTokenSecretProto) org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); expiryDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); keyId_ = 0; bitField0_ = (bitField0_ & ~0x00000002); userId_ = ""; bitField0_ = (bitField0_ & ~0x00000004); blockPoolId_ = ""; bitField0_ = (bitField0_ & ~0x00000008); blockId_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); modes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000100); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.expiryDate_ = expiryDate_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.keyId_ = keyId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.userId_ = userId_; if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.blockPoolId_ = blockPoolId_; if (((from_bitField0_ & 0x00000010) != 0)) { result.blockId_ = blockId_; to_bitField0_ |= 0x00000010; } if (((bitField0_ & 0x00000020) != 0)) { modes_ = java.util.Collections.unmodifiableList(modes_); bitField0_ = (bitField0_ & ~0x00000020); } result.modes_ = modes_; if (((bitField0_ & 0x00000040) != 0)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000040); } result.storageTypes_ = storageTypes_; if (((bitField0_ & 0x00000080) != 0)) { storageIds_ = storageIds_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000080); } result.storageIds_ = storageIds_; if (((from_bitField0_ & 0x00000100) != 0)) { to_bitField0_ |= 0x00000020; } result.handshakeSecret_ = handshakeSecret_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.getDefaultInstance()) return this; if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } if (other.hasKeyId()) { setKeyId(other.getKeyId()); } if (other.hasUserId()) { bitField0_ |= 0x00000004; userId_ = other.userId_; onChanged(); } if (other.hasBlockPoolId()) { bitField0_ |= 0x00000008; blockPoolId_ = other.blockPoolId_; onChanged(); } if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (!other.modes_.isEmpty()) { if (modes_.isEmpty()) { modes_ = other.modes_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureModesIsMutable(); modes_.addAll(other.modes_); } onChanged(); } if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } if (!other.storageIds_.isEmpty()) { if (storageIds_.isEmpty()) { storageIds_ = other.storageIds_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureStorageIdsIsMutable(); storageIds_.addAll(other.storageIds_); } onChanged(); } if (other.hasHandshakeSecret()) { setHandshakeSecret(other.getHandshakeSecret()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long expiryDate_ ; /** * optional uint64 expiryDate = 1; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 expiryDate = 1; */ public long getExpiryDate() { return expiryDate_; } /** * optional uint64 expiryDate = 1; */ public Builder setExpiryDate(long value) { bitField0_ |= 0x00000001; expiryDate_ = value; onChanged(); return this; } /** * optional uint64 expiryDate = 1; */ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000001); expiryDate_ = 0L; onChanged(); return this; } private int keyId_ ; /** * optional uint32 keyId = 2; */ public boolean hasKeyId() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 keyId = 2; */ public int getKeyId() { return keyId_; } /** * optional uint32 keyId = 2; */ public Builder setKeyId(int value) { bitField0_ |= 0x00000002; keyId_ = value; onChanged(); return this; } /** * optional uint32 keyId = 2; */ public Builder clearKeyId() { bitField0_ = (bitField0_ & ~0x00000002); keyId_ = 0; onChanged(); return this; } private java.lang.Object userId_ = ""; /** * optional string userId = 3; */ public boolean hasUserId() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string userId = 3; */ public java.lang.String getUserId() { java.lang.Object ref = userId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { userId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string userId = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getUserIdBytes() { java.lang.Object ref = userId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); userId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string userId = 3; */ public Builder setUserId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; userId_ = value; onChanged(); return this; } /** * optional string userId = 3; */ public Builder clearUserId() { bitField0_ = (bitField0_ & ~0x00000004); userId_ = getDefaultInstance().getUserId(); onChanged(); return this; } /** * optional string userId = 3; */ public Builder setUserIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; userId_ = value; onChanged(); return this; } private java.lang.Object blockPoolId_ = ""; /** * optional string blockPoolId = 4; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000008) != 0); } /** * optional string blockPoolId = 4; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string blockPoolId = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string blockPoolId = 4; */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; blockPoolId_ = value; onChanged(); return this; } /** * optional string blockPoolId = 4; */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000008); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /** * optional string blockPoolId = 4; */ public Builder setBlockPoolIdBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; blockPoolId_ = value; onChanged(); return this; } private long blockId_ ; /** * optional uint64 blockId = 5; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 blockId = 5; */ public long getBlockId() { return blockId_; } /** * optional uint64 blockId = 5; */ public Builder setBlockId(long value) { bitField0_ |= 0x00000010; blockId_ = value; onChanged(); return this; } /** * optional uint64 blockId = 5; */ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000010); blockId_ = 0L; onChanged(); return this; } private java.util.List modes_ = java.util.Collections.emptyList(); private void ensureModesIsMutable() { if (!((bitField0_ & 0x00000020) != 0)) { modes_ = new java.util.ArrayList(modes_); bitField0_ |= 0x00000020; } } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public java.util.List getModesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>(modes_, modes_converter_); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public int getModesCount() { return modes_.size(); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index) { return modes_converter_.convert(modes_.get(index)); } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public Builder setModes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value) { if (value == null) { throw new NullPointerException(); } ensureModesIsMutable(); modes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public Builder addModes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value) { if (value == null) { throw new NullPointerException(); } ensureModesIsMutable(); modes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public Builder addAllModes( java.lang.Iterable values) { ensureModesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value : values) { modes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.AccessModeProto modes = 6; */ public Builder clearModes() { modes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000040) != 0)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000040; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public java.util.List getStorageTypesList() { return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter< java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_converter_.convert(storageTypes_.get(index)); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value.getNumber()); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) { storageTypes_.add(value.getNumber()); } onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.LazyStringList storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageIdsIsMutable() { if (!((bitField0_ & 0x00000080) != 0)) { storageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageIds_); bitField0_ |= 0x00000080; } } /** * repeated string storageIds = 8; */ public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList getStorageIdsList() { return storageIds_.getUnmodifiableView(); } /** * repeated string storageIds = 8; */ public int getStorageIdsCount() { return storageIds_.size(); } /** * repeated string storageIds = 8; */ public java.lang.String getStorageIds(int index) { return storageIds_.get(index); } /** * repeated string storageIds = 8; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStorageIdsBytes(int index) { return storageIds_.getByteString(index); } /** * repeated string storageIds = 8; */ public Builder setStorageIds( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIdsIsMutable(); storageIds_.set(index, value); onChanged(); return this; } /** * repeated string storageIds = 8; */ public Builder addStorageIds( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIdsIsMutable(); storageIds_.add(value); onChanged(); return this; } /** * repeated string storageIds = 8; */ public Builder addAllStorageIds( java.lang.Iterable values) { ensureStorageIdsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, storageIds_); onChanged(); return this; } /** * repeated string storageIds = 8; */ public Builder clearStorageIds() { storageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } /** * repeated string storageIds = 8; */ public Builder addStorageIdsBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageIdsIsMutable(); storageIds_.add(value); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes handshakeSecret = 9; */ public boolean hasHandshakeSecret() { return ((bitField0_ & 0x00000100) != 0); } /** * optional bytes handshakeSecret = 9; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret() { return handshakeSecret_; } /** * optional bytes handshakeSecret = 9; */ public Builder setHandshakeSecret(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000100; handshakeSecret_ = value; onChanged(); return this; } /** * optional bytes handshakeSecret = 9; */ public Builder clearHandshakeSecret() { bitField0_ = (bitField0_ & ~0x00000100); handshakeSecret_ = getDefaultInstance().getHandshakeSecret(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockTokenSecretProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockTokenSecretProto) private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto(); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public BlockTokenSecretProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new BlockTokenSecretProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageReportProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_QuotaUsageProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypesProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CipherOptionProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ECSchemaProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable; public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\nhdfs.proto\022\013hadoop.hdfs\032\016Security.prot" + "o\032\tacl.proto\"c\n\022ExtendedBlockProto\022\016\n\006po" + "olId\030\001 \002(\t\022\017\n\007blockId\030\002 \002(\004\022\027\n\017generatio" + "nStamp\030\003 \002(\004\022\023\n\010numBytes\030\004 \001(\004:\0010\"[\n\034Pro" + "videdStorageLocationProto\022\014\n\004path\030\001 \002(\t\022" + "\016\n\006offset\030\002 \002(\003\022\016\n\006length\030\003 \002(\003\022\r\n\005nonce" + "\030\004 \002(\014\"\231\001\n\017DatanodeIDProto\022\016\n\006ipAddr\030\001 \002" + "(\t\022\020\n\010hostName\030\002 \002(\t\022\024\n\014datanodeUuid\030\003 \002" + "(\t\022\020\n\010xferPort\030\004 \002(\r\022\020\n\010infoPort\030\005 \002(\r\022\017" + "\n\007ipcPort\030\006 \002(\r\022\031\n\016infoSecurePort\030\007 \001(\r:" + "\0010\"X\n\026DatanodeLocalInfoProto\022\027\n\017software" + "Version\030\001 \002(\t\022\025\n\rconfigVersion\030\002 \002(\t\022\016\n\006" + "uptime\030\003 \002(\004\"\315\001\n\027DatanodeVolumeInfoProto" + "\022\014\n\004path\030\001 \002(\t\0222\n\013storageType\030\002 \002(\0162\035.ha" + "doop.hdfs.StorageTypeProto\022\021\n\tusedSpace\030" + "\003 \002(\004\022\021\n\tfreeSpace\030\004 \002(\004\022\025\n\rreservedSpac" + "e\030\005 \002(\004\022 \n\030reservedSpaceForReplicas\030\006 \002(" + "\004\022\021\n\tnumBlocks\030\007 \002(\004\"G\n\022DatanodeInfosPro" + "to\0221\n\tdatanodes\030\001 \003(\0132\036.hadoop.hdfs.Data" + "nodeInfoProto\"\356\004\n\021DatanodeInfoProto\022(\n\002i" + "d\030\001 \002(\0132\034.hadoop.hdfs.DatanodeIDProto\022\023\n" + "\010capacity\030\002 \001(\004:\0010\022\022\n\007dfsUsed\030\003 \001(\004:\0010\022\024" + "\n\tremaining\030\004 \001(\004:\0010\022\030\n\rblockPoolUsed\030\005 " + "\001(\004:\0010\022\025\n\nlastUpdate\030\006 \001(\004:\0010\022\027\n\014xceiver" + "Count\030\007 \001(\r:\0010\022\020\n\010location\030\010 \001(\t\022\022\n\nnonD" + "fsUsed\030\t \001(\004\022E\n\nadminState\030\n \001(\0162).hadoo" + "p.hdfs.DatanodeInfoProto.AdminState:\006NOR" + "MAL\022\030\n\rcacheCapacity\030\013 \001(\004:\0010\022\024\n\tcacheUs" + "ed\030\014 \001(\004:\0010\022\036\n\023lastUpdateMonotonic\030\r \001(\004" + ":\0010\022\025\n\rupgradeDomain\030\016 \001(\t\022\036\n\023lastBlockR" + "eportTime\030\017 \001(\004:\0010\022#\n\030lastBlockReportMon" + "otonic\030\020 \001(\004:\0010\022\024\n\tnumBlocks\030\021 \001(\r:\0010\"w\n" + "\nAdminState\022\n\n\006NORMAL\020\000\022\033\n\027DECOMMISSION_" + "INPROGRESS\020\001\022\022\n\016DECOMMISSIONED\020\002\022\030\n\024ENTE" + "RING_MAINTENANCE\020\003\022\022\n\016IN_MAINTENANCE\020\004\"\336" + "\001\n\024DatanodeStorageProto\022\023\n\013storageUuid\030\001" + " \002(\t\022E\n\005state\030\002 \001(\0162..hadoop.hdfs.Datano" + "deStorageProto.StorageState:\006NORMAL\0228\n\013s" + "torageType\030\003 \001(\0162\035.hadoop.hdfs.StorageTy" + "peProto:\004DISK\"0\n\014StorageState\022\n\n\006NORMAL\020" + "\000\022\024\n\020READ_ONLY_SHARED\020\001\"\345\001\n\022StorageRepor" + "tProto\022\027\n\013storageUuid\030\001 \002(\tB\002\030\001\022\025\n\006faile" + "d\030\002 \001(\010:\005false\022\023\n\010capacity\030\003 \001(\004:\0010\022\022\n\007d" + "fsUsed\030\004 \001(\004:\0010\022\024\n\tremaining\030\005 \001(\004:\0010\022\030\n" + "\rblockPoolUsed\030\006 \001(\004:\0010\0222\n\007storage\030\007 \001(\013" + "2!.hadoop.hdfs.DatanodeStorageProto\022\022\n\nn" + "onDfsUsed\030\010 \001(\004\"\332\002\n\023ContentSummaryProto\022" + "\016\n\006length\030\001 \002(\004\022\021\n\tfileCount\030\002 \002(\004\022\026\n\016di" + "rectoryCount\030\003 \002(\004\022\r\n\005quota\030\004 \002(\004\022\025\n\rspa" + "ceConsumed\030\005 \002(\004\022\022\n\nspaceQuota\030\006 \002(\004\022?\n\016" + "typeQuotaInfos\030\007 \001(\0132\'.hadoop.hdfs.Stora" + "geTypeQuotaInfosProto\022\026\n\016snapshotLength\030" + "\010 \001(\004\022\031\n\021snapshotFileCount\030\t \001(\004\022\036\n\026snap" + "shotDirectoryCount\030\n \001(\004\022\035\n\025snapshotSpac" + "eConsumed\030\013 \001(\004\022\033\n\023erasureCodingPolicy\030\014" + " \001(\t\"\253\001\n\017QuotaUsageProto\022\035\n\025fileAndDirec" + "toryCount\030\001 \002(\004\022\r\n\005quota\030\002 \002(\004\022\025\n\rspaceC" + "onsumed\030\003 \002(\004\022\022\n\nspaceQuota\030\004 \002(\004\022?\n\016typ" + "eQuotaInfos\030\005 \001(\0132\'.hadoop.hdfs.StorageT" + "ypeQuotaInfosProto\"[\n\032StorageTypeQuotaIn" + "fosProto\022=\n\rtypeQuotaInfo\030\001 \003(\0132&.hadoop" + ".hdfs.StorageTypeQuotaInfoProto\"o\n\031Stora" + "geTypeQuotaInfoProto\0221\n\004type\030\001 \001(\0162\035.had" + "oop.hdfs.StorageTypeProto:\004DISK\022\r\n\005quota" + "\030\002 \002(\004\022\020\n\010consumed\030\003 \002(\004\"7\n\026CorruptFileB" + "locksProto\022\r\n\005files\030\001 \003(\t\022\016\n\006cookie\030\002 \002(" + "\t\"H\n\021StorageTypesProto\0223\n\014storageTypes\030\001" + " \003(\0162\035.hadoop.hdfs.StorageTypeProto\"\364\001\n\027" + "BlockStoragePolicyProto\022\020\n\010policyId\030\001 \002(" + "\r\022\014\n\004name\030\002 \002(\t\0226\n\016creationPolicy\030\003 \002(\0132" + "\036.hadoop.hdfs.StorageTypesProto\022>\n\026creat" + "ionFallbackPolicy\030\004 \001(\0132\036.hadoop.hdfs.St" + "orageTypesProto\022A\n\031replicationFallbackPo" + "licy\030\005 \001(\0132\036.hadoop.hdfs.StorageTypesPro" + "to\"\342\002\n\021LocatedBlockProto\022*\n\001b\030\001 \002(\0132\037.ha" + "doop.hdfs.ExtendedBlockProto\022\016\n\006offset\030\002" + " \002(\004\022,\n\004locs\030\003 \003(\0132\036.hadoop.hdfs.Datanod" + "eInfoProto\022\017\n\007corrupt\030\004 \002(\010\022-\n\nblockToke" + "n\030\005 \002(\0132\031.hadoop.common.TokenProto\022\024\n\010is" + "Cached\030\006 \003(\010B\002\020\001\0223\n\014storageTypes\030\007 \003(\0162\035" + ".hadoop.hdfs.StorageTypeProto\022\022\n\nstorage" + "IDs\030\010 \003(\t\022\024\n\014blockIndices\030\t \001(\014\022.\n\013block" + "Tokens\030\n \003(\0132\031.hadoop.common.TokenProto\"" + "Q\n\026BatchedListingKeyProto\022\020\n\010checksum\030\001 " + "\002(\014\022\021\n\tpathIndex\030\002 \002(\r\022\022\n\nstartAfter\030\003 \002" + "(\014\"\223\001\n\026DataEncryptionKeyProto\022\r\n\005keyId\030\001" + " \002(\r\022\023\n\013blockPoolId\030\002 \002(\t\022\r\n\005nonce\030\003 \002(\014" + "\022\025\n\rencryptionKey\030\004 \002(\014\022\022\n\nexpiryDate\030\005 " + "\002(\004\022\033\n\023encryptionAlgorithm\030\006 \001(\t\"\323\001\n\027Fil" + "eEncryptionInfoProto\022,\n\005suite\030\001 \002(\0162\035.ha" + "doop.hdfs.CipherSuiteProto\022F\n\025cryptoProt" + "ocolVersion\030\002 \002(\0162\'.hadoop.hdfs.CryptoPr" + "otocolVersionProto\022\013\n\003key\030\003 \002(\014\022\n\n\002iv\030\004 " + "\002(\014\022\017\n\007keyName\030\005 \002(\t\022\030\n\020ezKeyVersionName" + "\030\006 \002(\t\"O\n\032PerFileEncryptionInfoProto\022\013\n\003" + "key\030\001 \002(\014\022\n\n\002iv\030\002 \002(\014\022\030\n\020ezKeyVersionNam" + "e\030\003 \002(\t\"\337\001\n\027ZoneEncryptionInfoProto\022,\n\005s" + "uite\030\001 \002(\0162\035.hadoop.hdfs.CipherSuiteProt" + "o\022F\n\025cryptoProtocolVersion\030\002 \002(\0162\'.hadoo" + "p.hdfs.CryptoProtocolVersionProto\022\017\n\007key" + "Name\030\003 \002(\t\022=\n\021reencryptionProto\030\004 \001(\0132\"." + "hadoop.hdfs.ReencryptionInfoProto\"\262\001\n\025Re" + "encryptionInfoProto\022\030\n\020ezKeyVersionName\030" + "\001 \002(\t\022\026\n\016submissionTime\030\002 \002(\004\022\020\n\010cancele" + "d\030\003 \002(\010\022\026\n\016numReencrypted\030\004 \002(\003\022\023\n\013numFa" + "ilures\030\005 \002(\003\022\026\n\016completionTime\030\006 \001(\004\022\020\n\010" + "lastFile\030\007 \001(\t\"}\n\021CipherOptionProto\022,\n\005s" + "uite\030\001 \002(\0162\035.hadoop.hdfs.CipherSuiteProt" + "o\022\r\n\005inKey\030\002 \001(\014\022\014\n\004inIv\030\003 \001(\014\022\016\n\006outKey" + "\030\004 \001(\014\022\r\n\005outIv\030\005 \001(\014\"\276\002\n\022LocatedBlocksP" + "roto\022\022\n\nfileLength\030\001 \002(\004\022.\n\006blocks\030\002 \003(\013" + "2\036.hadoop.hdfs.LocatedBlockProto\022\031\n\021unde" + "rConstruction\030\003 \002(\010\0221\n\tlastBlock\030\004 \001(\0132\036" + ".hadoop.hdfs.LocatedBlockProto\022\033\n\023isLast" + "BlockComplete\030\005 \002(\010\022@\n\022fileEncryptionInf" + "o\030\006 \001(\0132$.hadoop.hdfs.FileEncryptionInfo" + "Proto\0227\n\010ecPolicy\030\007 \001(\0132%.hadoop.hdfs.Er" + "asureCodingPolicyProto\"6\n\030ECSchemaOption" + "EntryProto\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"\202" + "\001\n\rECSchemaProto\022\021\n\tcodecName\030\001 \002(\t\022\021\n\td" + "ataUnits\030\002 \002(\r\022\023\n\013parityUnits\030\003 \002(\r\0226\n\007o" + "ptions\030\004 \003(\0132%.hadoop.hdfs.ECSchemaOptio" + "nEntryProto\"\261\001\n\030ErasureCodingPolicyProto" + "\022\014\n\004name\030\001 \001(\t\022*\n\006schema\030\002 \001(\0132\032.hadoop." + "hdfs.ECSchemaProto\022\020\n\010cellSize\030\003 \001(\r\022\n\n\002" + "id\030\004 \002(\r\022=\n\005state\030\005 \001(\0162%.hadoop.hdfs.Er" + "asureCodingPolicyState:\007ENABLED\"\177\n#AddEr" + "asureCodingPolicyResponseProto\0225\n\006policy" + "\030\001 \002(\0132%.hadoop.hdfs.ErasureCodingPolicy" + "Proto\022\017\n\007succeed\030\002 \002(\010\022\020\n\010errorMsg\030\003 \001(\t" + "\"K\n\035ECTopologyVerifierResultProto\022\025\n\rres" + "ultMessage\030\001 \002(\t\022\023\n\013isSupported\030\002 \002(\010\"C\n" + "\023HdfsPathHandleProto\022\017\n\007inodeId\030\001 \001(\004\022\r\n" + "\005mtime\030\002 \001(\004\022\014\n\004path\030\003 \001(\t\"\272\005\n\023HdfsFileS" + "tatusProto\022;\n\010fileType\030\001 \002(\0162).hadoop.hd" + "fs.HdfsFileStatusProto.FileType\022\014\n\004path\030" + "\002 \002(\014\022\016\n\006length\030\003 \002(\004\0222\n\npermission\030\004 \002(" + "\0132\036.hadoop.hdfs.FsPermissionProto\022\r\n\005own" + "er\030\005 \002(\t\022\r\n\005group\030\006 \002(\t\022\031\n\021modification_" + "time\030\007 \002(\004\022\023\n\013access_time\030\010 \002(\004\022\017\n\007symli" + "nk\030\t \001(\014\022\034\n\021block_replication\030\n \001(\r:\0010\022\024" + "\n\tblocksize\030\013 \001(\004:\0010\0222\n\tlocations\030\014 \001(\0132" + "\037.hadoop.hdfs.LocatedBlocksProto\022\021\n\006file" + "Id\030\r \001(\004:\0010\022\027\n\013childrenNum\030\016 \001(\005:\002-1\022@\n\022" + "fileEncryptionInfo\030\017 \001(\0132$.hadoop.hdfs.F" + "ileEncryptionInfoProto\022\030\n\rstoragePolicy\030" + "\020 \001(\r:\0010\0227\n\010ecPolicy\030\021 \001(\0132%.hadoop.hdfs" + ".ErasureCodingPolicyProto\022\020\n\005flags\030\022 \001(\r" + ":\0010\"3\n\010FileType\022\n\n\006IS_DIR\020\001\022\013\n\007IS_FILE\020\002" + "\022\016\n\nIS_SYMLINK\020\003\"E\n\005Flags\022\013\n\007HAS_ACL\020\001\022\r" + "\n\tHAS_CRYPT\020\002\022\n\n\006HAS_EC\020\004\022\024\n\020SNAPSHOT_EN" + "ABLED\020\010\"y\n\031BlockChecksumOptionsProto\022F\n\021" + "blockChecksumType\030\001 \001(\0162#.hadoop.hdfs.Bl" + "ockChecksumTypeProto:\006MD5CRC\022\024\n\014stripeLe" + "ngth\030\002 \001(\004\"\273\002\n\025FsServerDefaultsProto\022\021\n\t" + "blockSize\030\001 \002(\004\022\030\n\020bytesPerChecksum\030\002 \002(" + "\r\022\027\n\017writePacketSize\030\003 \002(\r\022\023\n\013replicatio" + "n\030\004 \002(\r\022\026\n\016fileBufferSize\030\005 \002(\r\022\"\n\023encry" + "ptDataTransfer\030\006 \001(\010:\005false\022\030\n\rtrashInte" + "rval\030\007 \001(\004:\0010\022D\n\014checksumType\030\010 \001(\0162\036.ha" + "doop.hdfs.ChecksumTypeProto:\016CHECKSUM_CR" + "C32\022\026\n\016keyProviderUri\030\t \001(\t\022\023\n\010policyId\030" + "\n \001(\r:\0010\"k\n\025DirectoryListingProto\0228\n\016par" + "tialListing\030\001 \003(\0132 .hadoop.hdfs.HdfsFile" + "StatusProto\022\030\n\020remainingEntries\030\002 \002(\r\":\n" + "\024RemoteExceptionProto\022\021\n\tclassName\030\001 \002(\t" + "\022\017\n\007message\030\002 \001(\t\"\241\001\n\034BatchedDirectoryLi" + "stingProto\0228\n\016partialListing\030\001 \003(\0132 .had" + "oop.hdfs.HdfsFileStatusProto\022\021\n\tparentId" + "x\030\002 \002(\r\0224\n\texception\030\003 \001(\0132!.hadoop.hdfs" + ".RemoteExceptionProto\"\242\001\n!SnapshottableD" + "irectoryStatusProto\0223\n\tdirStatus\030\001 \002(\0132 " + ".hadoop.hdfs.HdfsFileStatusProto\022\026\n\016snap" + "shot_quota\030\002 \002(\r\022\027\n\017snapshot_number\030\003 \002(" + "\r\022\027\n\017parent_fullpath\030\004 \002(\014\"u\n\"Snapshotta" + "bleDirectoryListingProto\022O\n\027snapshottabl" + "eDirListing\030\001 \003(\0132..hadoop.hdfs.Snapshot" + "tableDirectoryStatusProto\"_\n\034SnapshotDif" + "fReportEntryProto\022\020\n\010fullpath\030\001 \002(\014\022\031\n\021m" + "odificationLabel\030\002 \002(\t\022\022\n\ntargetPath\030\003 \001" + "(\014\"\237\001\n\027SnapshotDiffReportProto\022\024\n\014snapsh" + "otRoot\030\001 \002(\t\022\024\n\014fromSnapshot\030\002 \002(\t\022\022\n\nto" + "Snapshot\030\003 \002(\t\022D\n\021diffReportEntries\030\004 \003(" + "\0132).hadoop.hdfs.SnapshotDiffReportEntryP" + "roto\"\177\n#SnapshotDiffReportListingEntryPr" + "oto\022\020\n\010fullpath\030\001 \002(\014\022\r\n\005dirId\030\002 \002(\004\022\023\n\013" + "isReference\030\003 \002(\010\022\022\n\ntargetPath\030\004 \001(\014\022\016\n" + "\006fileId\030\005 \001(\004\"E\n\035SnapshotDiffReportCurso" + "rProto\022\021\n\tstartPath\030\001 \002(\014\022\021\n\005index\030\002 \002(\005" + ":\002-1\"\322\002\n\036SnapshotDiffReportListingProto\022" + "I\n\017modifiedEntries\030\001 \003(\01320.hadoop.hdfs.S" + "napshotDiffReportListingEntryProto\022H\n\016cr" + "eatedEntries\030\002 \003(\01320.hadoop.hdfs.Snapsho" + "tDiffReportListingEntryProto\022H\n\016deletedE" + "ntries\030\003 \003(\01320.hadoop.hdfs.SnapshotDiffR" + "eportListingEntryProto\022\025\n\risFromEarlier\030" + "\004 \002(\010\022:\n\006cursor\030\005 \001(\0132*.hadoop.hdfs.Snap" + "shotDiffReportCursorProto\"D\n\nBlockProto\022" + "\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\023\n\010nu" + "mBytes\030\003 \001(\004:\0010\"\245\001\n\021SnapshotInfoProto\022\024\n" + "\014snapshotName\030\001 \002(\t\022\024\n\014snapshotRoot\030\002 \002(" + "\t\0222\n\npermission\030\003 \002(\0132\036.hadoop.hdfs.FsPe" + "rmissionProto\022\r\n\005owner\030\004 \002(\t\022\r\n\005group\030\005 " + "\002(\t\022\022\n\ncreateTime\030\006 \002(\t\"J\n\031RollingUpgrad" + "eStatusProto\022\023\n\013blockPoolId\030\001 \002(\t\022\030\n\tfin" + "alized\030\002 \001(\010:\005false\")\n\021StorageUuidsProto" + "\022\024\n\014storageUuids\030\001 \003(\t\"\377\001\n\025BlockTokenSec" + "retProto\022\022\n\nexpiryDate\030\001 \001(\004\022\r\n\005keyId\030\002 " + "\001(\r\022\016\n\006userId\030\003 \001(\t\022\023\n\013blockPoolId\030\004 \001(\t" + "\022\017\n\007blockId\030\005 \001(\004\022+\n\005modes\030\006 \003(\0162\034.hadoo" + "p.hdfs.AccessModeProto\0223\n\014storageTypes\030\007" + " \003(\0162\035.hadoop.hdfs.StorageTypeProto\022\022\n\ns" + "torageIds\030\010 \003(\t\022\027\n\017handshakeSecret\030\t \001(\014" + "*N\n\020StorageTypeProto\022\010\n\004DISK\020\001\022\007\n\003SSD\020\002\022" + "\013\n\007ARCHIVE\020\003\022\014\n\010RAM_DISK\020\004\022\014\n\010PROVIDED\020\005" + "*-\n\016BlockTypeProto\022\016\n\nCONTIGUOUS\020\000\022\013\n\007ST" + "RIPED\020\001*6\n\020CipherSuiteProto\022\013\n\007UNKNOWN\020\001" + "\022\025\n\021AES_CTR_NOPADDING\020\002*P\n\032CryptoProtoco" + "lVersionProto\022\034\n\030UNKNOWN_PROTOCOL_VERSIO" + "N\020\001\022\024\n\020ENCRYPTION_ZONES\020\002*B\n\030ErasureCodi" + "ngPolicyState\022\014\n\010DISABLED\020\001\022\013\n\007ENABLED\020\002" + "\022\013\n\007REMOVED\020\003*O\n\021ChecksumTypeProto\022\021\n\rCH" + "ECKSUM_NULL\020\000\022\022\n\016CHECKSUM_CRC32\020\001\022\023\n\017CHE" + "CKSUM_CRC32C\020\002*7\n\026BlockChecksumTypeProto" + "\022\n\n\006MD5CRC\020\001\022\021\n\rCOMPOSITE_CRC\020\002*=\n\017Acces" + "sModeProto\022\010\n\004READ\020\001\022\t\n\005WRITE\020\002\022\010\n\004COPY\020" + "\003\022\013\n\007REPLACE\020\004B6\n%org.apache.hadoop.hdfs" + ".protocol.protoB\nHdfsProtos\240\001\001" }; org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { public org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor root) { descriptor = root; return null; } }; org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(), }, assigner); internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor, new java.lang.String[] { "PoolId", "BlockId", "GenerationStamp", "NumBytes", }); internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor, new java.lang.String[] { "Path", "Offset", "Length", "Nonce", }); internal_static_hadoop_hdfs_DatanodeIDProto_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeIDProto_descriptor, new java.lang.String[] { "IpAddr", "HostName", "DatanodeUuid", "XferPort", "InfoPort", "IpcPort", "InfoSecurePort", }); internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor, new java.lang.String[] { "SoftwareVersion", "ConfigVersion", "Uptime", }); internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor, new java.lang.String[] { "Path", "StorageType", "UsedSpace", "FreeSpace", "ReservedSpace", "ReservedSpaceForReplicas", "NumBlocks", }); internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor, new java.lang.String[] { "Datanodes", }); internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor, new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "NonDfsUsed", "AdminState", "CacheCapacity", "CacheUsed", "LastUpdateMonotonic", "UpgradeDomain", "LastBlockReportTime", "LastBlockReportMonotonic", "NumBlocks", }); internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor, new java.lang.String[] { "StorageUuid", "State", "StorageType", }); internal_static_hadoop_hdfs_StorageReportProto_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageReportProto_descriptor, new java.lang.String[] { "StorageUuid", "Failed", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "Storage", "NonDfsUsed", }); internal_static_hadoop_hdfs_ContentSummaryProto_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ContentSummaryProto_descriptor, new java.lang.String[] { "Length", "FileCount", "DirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", "TypeQuotaInfos", "SnapshotLength", "SnapshotFileCount", "SnapshotDirectoryCount", "SnapshotSpaceConsumed", "ErasureCodingPolicy", }); internal_static_hadoop_hdfs_QuotaUsageProto_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_QuotaUsageProto_descriptor, new java.lang.String[] { "FileAndDirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", "TypeQuotaInfos", }); internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor = getDescriptor().getMessageTypes().get(11); internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor, new java.lang.String[] { "TypeQuotaInfo", }); internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor, new java.lang.String[] { "Type", "Quota", "Consumed", }); internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor, new java.lang.String[] { "Files", "Cookie", }); internal_static_hadoop_hdfs_StorageTypesProto_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypesProto_descriptor, new java.lang.String[] { "StorageTypes", }); internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor, new java.lang.String[] { "PolicyId", "Name", "CreationPolicy", "CreationFallbackPolicy", "ReplicationFallbackPolicy", }); internal_static_hadoop_hdfs_LocatedBlockProto_descriptor = getDescriptor().getMessageTypes().get(16); internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_LocatedBlockProto_descriptor, new java.lang.String[] { "B", "Offset", "Locs", "Corrupt", "BlockToken", "IsCached", "StorageTypes", "StorageIDs", "BlockIndices", "BlockTokens", }); internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor = getDescriptor().getMessageTypes().get(17); internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor, new java.lang.String[] { "Checksum", "PathIndex", "StartAfter", }); internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor, new java.lang.String[] { "KeyId", "BlockPoolId", "Nonce", "EncryptionKey", "ExpiryDate", "EncryptionAlgorithm", }); internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(19); internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor, new java.lang.String[] { "Suite", "CryptoProtocolVersion", "Key", "Iv", "KeyName", "EzKeyVersionName", }); internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor, new java.lang.String[] { "Key", "Iv", "EzKeyVersionName", }); internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(21); internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor, new java.lang.String[] { "Suite", "CryptoProtocolVersion", "KeyName", "ReencryptionProto", }); internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(22); internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor, new java.lang.String[] { "EzKeyVersionName", "SubmissionTime", "Canceled", "NumReencrypted", "NumFailures", "CompletionTime", "LastFile", }); internal_static_hadoop_hdfs_CipherOptionProto_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_CipherOptionProto_descriptor, new java.lang.String[] { "Suite", "InKey", "InIv", "OutKey", "OutIv", }); internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor, new java.lang.String[] { "FileLength", "Blocks", "UnderConstruction", "LastBlock", "IsLastBlockComplete", "FileEncryptionInfo", "EcPolicy", }); internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor = getDescriptor().getMessageTypes().get(25); internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor, new java.lang.String[] { "Key", "Value", }); internal_static_hadoop_hdfs_ECSchemaProto_descriptor = getDescriptor().getMessageTypes().get(26); internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ECSchemaProto_descriptor, new java.lang.String[] { "CodecName", "DataUnits", "ParityUnits", "Options", }); internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor = getDescriptor().getMessageTypes().get(27); internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor, new java.lang.String[] { "Name", "Schema", "CellSize", "Id", "State", }); internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor = getDescriptor().getMessageTypes().get(28); internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor, new java.lang.String[] { "Policy", "Succeed", "ErrorMsg", }); internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor = getDescriptor().getMessageTypes().get(29); internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor, new java.lang.String[] { "ResultMessage", "IsSupported", }); internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor = getDescriptor().getMessageTypes().get(30); internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor, new java.lang.String[] { "InodeId", "Mtime", "Path", }); internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor = getDescriptor().getMessageTypes().get(31); internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor, new java.lang.String[] { "FileType", "Path", "Length", "Permission", "Owner", "Group", "ModificationTime", "AccessTime", "Symlink", "BlockReplication", "Blocksize", "Locations", "FileId", "ChildrenNum", "FileEncryptionInfo", "StoragePolicy", "EcPolicy", "Flags", }); internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor = getDescriptor().getMessageTypes().get(32); internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor, new java.lang.String[] { "BlockChecksumType", "StripeLength", }); internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor = getDescriptor().getMessageTypes().get(33); internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor, new java.lang.String[] { "BlockSize", "BytesPerChecksum", "WritePacketSize", "Replication", "FileBufferSize", "EncryptDataTransfer", "TrashInterval", "ChecksumType", "KeyProviderUri", "PolicyId", }); internal_static_hadoop_hdfs_DirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(34); internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_DirectoryListingProto_descriptor, new java.lang.String[] { "PartialListing", "RemainingEntries", }); internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor = getDescriptor().getMessageTypes().get(35); internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor, new java.lang.String[] { "ClassName", "Message", }); internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(36); internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor, new java.lang.String[] { "PartialListing", "ParentIdx", "Exception", }); internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor = getDescriptor().getMessageTypes().get(37); internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor, new java.lang.String[] { "DirStatus", "SnapshotQuota", "SnapshotNumber", "ParentFullpath", }); internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(38); internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor, new java.lang.String[] { "SnapshottableDirListing", }); internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor = getDescriptor().getMessageTypes().get(39); internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor, new java.lang.String[] { "Fullpath", "ModificationLabel", "TargetPath", }); internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor = getDescriptor().getMessageTypes().get(40); internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor, new java.lang.String[] { "SnapshotRoot", "FromSnapshot", "ToSnapshot", "DiffReportEntries", }); internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor = getDescriptor().getMessageTypes().get(41); internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor, new java.lang.String[] { "Fullpath", "DirId", "IsReference", "TargetPath", "FileId", }); internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor = getDescriptor().getMessageTypes().get(42); internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor, new java.lang.String[] { "StartPath", "Index", }); internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor = getDescriptor().getMessageTypes().get(43); internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor, new java.lang.String[] { "ModifiedEntries", "CreatedEntries", "DeletedEntries", "IsFromEarlier", "Cursor", }); internal_static_hadoop_hdfs_BlockProto_descriptor = getDescriptor().getMessageTypes().get(44); internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockProto_descriptor, new java.lang.String[] { "BlockId", "GenStamp", "NumBytes", }); internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor = getDescriptor().getMessageTypes().get(45); internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor, new java.lang.String[] { "SnapshotName", "SnapshotRoot", "Permission", "Owner", "Group", "CreateTime", }); internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor = getDescriptor().getMessageTypes().get(46); internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor, new java.lang.String[] { "BlockPoolId", "Finalized", }); internal_static_hadoop_hdfs_StorageUuidsProto_descriptor = getDescriptor().getMessageTypes().get(47); internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_StorageUuidsProto_descriptor, new java.lang.String[] { "StorageUuids", }); internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor = getDescriptor().getMessageTypes().get(48); internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor, new java.lang.String[] { "ExpiryDate", "KeyId", "UserId", "BlockPoolId", "BlockId", "Modes", "StorageTypes", "StorageIds", "HandshakeSecret", }); org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(); org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy