All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.server.namenode.FsImageProto Maven / Gradle / Ivy

There is a newer version: 3.4.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: fsimage.proto

package org.apache.hadoop.hdfs.server.namenode;

public final class FsImageProto {
  private FsImageProto() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  public interface FileSummaryOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FileSummary)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * 
     * The version of the above EBNF grammars.
     * 
* * required uint32 ondiskVersion = 1; */ boolean hasOndiskVersion(); /** *
     * The version of the above EBNF grammars.
     * 
* * required uint32 ondiskVersion = 1; */ int getOndiskVersion(); /** *
     * layoutVersion describes which features are available in the
     * FSImage.
     * 
* * required uint32 layoutVersion = 2; */ boolean hasLayoutVersion(); /** *
     * layoutVersion describes which features are available in the
     * FSImage.
     * 
* * required uint32 layoutVersion = 2; */ int getLayoutVersion(); /** * optional string codec = 3; */ boolean hasCodec(); /** * optional string codec = 3; */ java.lang.String getCodec(); /** * optional string codec = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getCodecBytes(); /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ java.util.List getSectionsList(); /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index); /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ int getSectionsCount(); /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ java.util.List getSectionsOrBuilderList(); /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary} */ public static final class FileSummary extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FileSummary) FileSummaryOrBuilder { private static final long serialVersionUID = 0L; // Use FileSummary.newBuilder() to construct. private FileSummary(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FileSummary() { codec_ = ""; sections_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FileSummary( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; ondiskVersion_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; layoutVersion_ = input.readUInt32(); break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; codec_ = bs; break; } case 34: { if (!((mutable_bitField0_ & 0x00000008) != 0)) { sections_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } sections_.add( input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000008) != 0)) { sections_ = java.util.Collections.unmodifiableList(sections_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder.class); } public interface SectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FileSummary.Section) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional string name = 1; */ boolean hasName(); /** * optional string name = 1; */ java.lang.String getName(); /** * optional string name = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes(); /** * optional uint64 length = 2; */ boolean hasLength(); /** * optional uint64 length = 2; */ long getLength(); /** * optional uint64 offset = 3; */ boolean hasOffset(); /** * optional uint64 offset = 3; */ long getOffset(); } /** *
     * index for each section
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary.Section} */ public static final class Section extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FileSummary.Section) SectionOrBuilder { private static final long serialVersionUID = 0L; // Use Section.newBuilder() to construct. private Section(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Section() { name_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Section( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; name_ = bs; break; } case 16: { bitField0_ |= 0x00000002; length_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; offset_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder.class); } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; private volatile java.lang.Object name_; /** * optional string name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string name = 1; */ public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * optional string name = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int LENGTH_FIELD_NUMBER = 2; private long length_; /** * optional uint64 length = 2; */ public boolean hasLength() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 length = 2; */ public long getLength() { return length_; } public static final int OFFSET_FIELD_NUMBER = 3; private long offset_; /** * optional uint64 offset = 3; */ public boolean hasOffset() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 offset = 3; */ public long getOffset() { return offset_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, name_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, length_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, offset_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, name_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, length_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, offset_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section) obj; if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasLength() != other.hasLength()) return false; if (hasLength()) { if (getLength() != other.getLength()) return false; } if (hasOffset() != other.hasOffset()) return false; if (hasOffset()) { if (getOffset() != other.getOffset()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLength()); } if (hasOffset()) { hash = (37 * hash) + OFFSET_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getOffset()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
       * index for each section
       * 
* * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary.Section} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FileSummary.Section) org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); length_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); offset_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.name_ = name_; if (((from_bitField0_ & 0x00000002) != 0)) { result.length_ = length_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.offset_ = offset_; to_bitField0_ |= 0x00000004; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance()) return this; if (other.hasName()) { bitField0_ |= 0x00000001; name_ = other.name_; onChanged(); } if (other.hasLength()) { setLength(other.getLength()); } if (other.hasOffset()) { setOffset(other.getOffset()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object name_ = ""; /** * optional string name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string name = 1; */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string name = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string name = 1; */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } /** * optional string name = 1; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * optional string name = 1; */ public Builder setNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } private long length_ ; /** * optional uint64 length = 2; */ public boolean hasLength() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 length = 2; */ public long getLength() { return length_; } /** * optional uint64 length = 2; */ public Builder setLength(long value) { bitField0_ |= 0x00000002; length_ = value; onChanged(); return this; } /** * optional uint64 length = 2; */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000002); length_ = 0L; onChanged(); return this; } private long offset_ ; /** * optional uint64 offset = 3; */ public boolean hasOffset() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 offset = 3; */ public long getOffset() { return offset_; } /** * optional uint64 offset = 3; */ public Builder setOffset(long value) { bitField0_ |= 0x00000004; offset_ = value; onChanged(); return this; } /** * optional uint64 offset = 3; */ public Builder clearOffset() { bitField0_ = (bitField0_ & ~0x00000004); offset_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FileSummary.Section) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FileSummary.Section) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser
() { @java.lang.Override public Section parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new Section(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser
parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser
getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; public static final int ONDISKVERSION_FIELD_NUMBER = 1; private int ondiskVersion_; /** *
     * The version of the above EBNF grammars.
     * 
* * required uint32 ondiskVersion = 1; */ public boolean hasOndiskVersion() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * The version of the above EBNF grammars.
     * 
* * required uint32 ondiskVersion = 1; */ public int getOndiskVersion() { return ondiskVersion_; } public static final int LAYOUTVERSION_FIELD_NUMBER = 2; private int layoutVersion_; /** *
     * layoutVersion describes which features are available in the
     * FSImage.
     * 
* * required uint32 layoutVersion = 2; */ public boolean hasLayoutVersion() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * layoutVersion describes which features are available in the
     * FSImage.
     * 
* * required uint32 layoutVersion = 2; */ public int getLayoutVersion() { return layoutVersion_; } public static final int CODEC_FIELD_NUMBER = 3; private volatile java.lang.Object codec_; /** * optional string codec = 3; */ public boolean hasCodec() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string codec = 3; */ public java.lang.String getCodec() { java.lang.Object ref = codec_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { codec_ = s; } return s; } } /** * optional string codec = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCodecBytes() { java.lang.Object ref = codec_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); codec_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int SECTIONS_FIELD_NUMBER = 4; private java.util.List sections_; /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public java.util.List getSectionsList() { return sections_; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public java.util.List getSectionsOrBuilderList() { return sections_; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public int getSectionsCount() { return sections_.size(); } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index) { return sections_.get(index); } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder( int index) { return sections_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasOndiskVersion()) { memoizedIsInitialized = 0; return false; } if (!hasLayoutVersion()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, ondiskVersion_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, layoutVersion_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, codec_); } for (int i = 0; i < sections_.size(); i++) { output.writeMessage(4, sections_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, ondiskVersion_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, layoutVersion_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, codec_); } for (int i = 0; i < sections_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, sections_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary) obj; if (hasOndiskVersion() != other.hasOndiskVersion()) return false; if (hasOndiskVersion()) { if (getOndiskVersion() != other.getOndiskVersion()) return false; } if (hasLayoutVersion() != other.hasLayoutVersion()) return false; if (hasLayoutVersion()) { if (getLayoutVersion() != other.getLayoutVersion()) return false; } if (hasCodec() != other.hasCodec()) return false; if (hasCodec()) { if (!getCodec() .equals(other.getCodec())) return false; } if (!getSectionsList() .equals(other.getSectionsList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasOndiskVersion()) { hash = (37 * hash) + ONDISKVERSION_FIELD_NUMBER; hash = (53 * hash) + getOndiskVersion(); } if (hasLayoutVersion()) { hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER; hash = (53 * hash) + getLayoutVersion(); } if (hasCodec()) { hash = (37 * hash) + CODEC_FIELD_NUMBER; hash = (53 * hash) + getCodec().hashCode(); } if (getSectionsCount() > 0) { hash = (37 * hash) + SECTIONS_FIELD_NUMBER; hash = (53 * hash) + getSectionsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FileSummary) org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummaryOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSectionsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); ondiskVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000001); layoutVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000002); codec_ = ""; bitField0_ = (bitField0_ & ~0x00000004); if (sectionsBuilder_ == null) { sections_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); } else { sectionsBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.ondiskVersion_ = ondiskVersion_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.layoutVersion_ = layoutVersion_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.codec_ = codec_; if (sectionsBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0)) { sections_ = java.util.Collections.unmodifiableList(sections_); bitField0_ = (bitField0_ & ~0x00000008); } result.sections_ = sections_; } else { result.sections_ = sectionsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.getDefaultInstance()) return this; if (other.hasOndiskVersion()) { setOndiskVersion(other.getOndiskVersion()); } if (other.hasLayoutVersion()) { setLayoutVersion(other.getLayoutVersion()); } if (other.hasCodec()) { bitField0_ |= 0x00000004; codec_ = other.codec_; onChanged(); } if (sectionsBuilder_ == null) { if (!other.sections_.isEmpty()) { if (sections_.isEmpty()) { sections_ = other.sections_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureSectionsIsMutable(); sections_.addAll(other.sections_); } onChanged(); } } else { if (!other.sections_.isEmpty()) { if (sectionsBuilder_.isEmpty()) { sectionsBuilder_.dispose(); sectionsBuilder_ = null; sections_ = other.sections_; bitField0_ = (bitField0_ & ~0x00000008); sectionsBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSectionsFieldBuilder() : null; } else { sectionsBuilder_.addAllMessages(other.sections_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasOndiskVersion()) { return false; } if (!hasLayoutVersion()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int ondiskVersion_ ; /** *
       * The version of the above EBNF grammars.
       * 
* * required uint32 ondiskVersion = 1; */ public boolean hasOndiskVersion() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * The version of the above EBNF grammars.
       * 
* * required uint32 ondiskVersion = 1; */ public int getOndiskVersion() { return ondiskVersion_; } /** *
       * The version of the above EBNF grammars.
       * 
* * required uint32 ondiskVersion = 1; */ public Builder setOndiskVersion(int value) { bitField0_ |= 0x00000001; ondiskVersion_ = value; onChanged(); return this; } /** *
       * The version of the above EBNF grammars.
       * 
* * required uint32 ondiskVersion = 1; */ public Builder clearOndiskVersion() { bitField0_ = (bitField0_ & ~0x00000001); ondiskVersion_ = 0; onChanged(); return this; } private int layoutVersion_ ; /** *
       * layoutVersion describes which features are available in the
       * FSImage.
       * 
* * required uint32 layoutVersion = 2; */ public boolean hasLayoutVersion() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * layoutVersion describes which features are available in the
       * FSImage.
       * 
* * required uint32 layoutVersion = 2; */ public int getLayoutVersion() { return layoutVersion_; } /** *
       * layoutVersion describes which features are available in the
       * FSImage.
       * 
* * required uint32 layoutVersion = 2; */ public Builder setLayoutVersion(int value) { bitField0_ |= 0x00000002; layoutVersion_ = value; onChanged(); return this; } /** *
       * layoutVersion describes which features are available in the
       * FSImage.
       * 
* * required uint32 layoutVersion = 2; */ public Builder clearLayoutVersion() { bitField0_ = (bitField0_ & ~0x00000002); layoutVersion_ = 0; onChanged(); return this; } private java.lang.Object codec_ = ""; /** * optional string codec = 3; */ public boolean hasCodec() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string codec = 3; */ public java.lang.String getCodec() { java.lang.Object ref = codec_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { codec_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string codec = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getCodecBytes() { java.lang.Object ref = codec_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); codec_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string codec = 3; */ public Builder setCodec( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; codec_ = value; onChanged(); return this; } /** * optional string codec = 3; */ public Builder clearCodec() { bitField0_ = (bitField0_ & ~0x00000004); codec_ = getDefaultInstance().getCodec(); onChanged(); return this; } /** * optional string codec = 3; */ public Builder setCodecBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; codec_ = value; onChanged(); return this; } private java.util.List sections_ = java.util.Collections.emptyList(); private void ensureSectionsIsMutable() { if (!((bitField0_ & 0x00000008) != 0)) { sections_ = new java.util.ArrayList(sections_); bitField0_ |= 0x00000008; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> sectionsBuilder_; /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public java.util.List getSectionsList() { if (sectionsBuilder_ == null) { return java.util.Collections.unmodifiableList(sections_); } else { return sectionsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public int getSectionsCount() { if (sectionsBuilder_ == null) { return sections_.size(); } else { return sectionsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index) { if (sectionsBuilder_ == null) { return sections_.get(index); } else { return sectionsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder setSections( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) { if (sectionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSectionsIsMutable(); sections_.set(index, value); onChanged(); } else { sectionsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder setSections( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) { if (sectionsBuilder_ == null) { ensureSectionsIsMutable(); sections_.set(index, builderForValue.build()); onChanged(); } else { sectionsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder addSections(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) { if (sectionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSectionsIsMutable(); sections_.add(value); onChanged(); } else { sectionsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder addSections( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) { if (sectionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSectionsIsMutable(); sections_.add(index, value); onChanged(); } else { sectionsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder addSections( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) { if (sectionsBuilder_ == null) { ensureSectionsIsMutable(); sections_.add(builderForValue.build()); onChanged(); } else { sectionsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder addSections( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) { if (sectionsBuilder_ == null) { ensureSectionsIsMutable(); sections_.add(index, builderForValue.build()); onChanged(); } else { sectionsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder addAllSections( java.lang.Iterable values) { if (sectionsBuilder_ == null) { ensureSectionsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, sections_); onChanged(); } else { sectionsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder clearSections() { if (sectionsBuilder_ == null) { sections_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { sectionsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public Builder removeSections(int index) { if (sectionsBuilder_ == null) { ensureSectionsIsMutable(); sections_.remove(index); onChanged(); } else { sectionsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder getSectionsBuilder( int index) { return getSectionsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder( int index) { if (sectionsBuilder_ == null) { return sections_.get(index); } else { return sectionsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public java.util.List getSectionsOrBuilderList() { if (sectionsBuilder_ != null) { return sectionsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(sections_); } } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder addSectionsBuilder() { return getSectionsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance()); } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder addSectionsBuilder( int index) { return getSectionsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance()); } /** * repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4; */ public java.util.List getSectionsBuilderList() { return getSectionsFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> getSectionsFieldBuilder() { if (sectionsBuilder_ == null) { sectionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder>( sections_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); sections_ = null; } return sectionsBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FileSummary) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FileSummary) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FileSummary parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new FileSummary(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface NameSystemSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.NameSystemSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 namespaceId = 1; */ boolean hasNamespaceId(); /** * optional uint32 namespaceId = 1; */ int getNamespaceId(); /** *
     * legacy generation stamp
     * 
* * optional uint64 genstampV1 = 2; */ boolean hasGenstampV1(); /** *
     * legacy generation stamp
     * 
* * optional uint64 genstampV1 = 2; */ long getGenstampV1(); /** *
     * generation stamp of latest version
     * 
* * optional uint64 genstampV2 = 3; */ boolean hasGenstampV2(); /** *
     * generation stamp of latest version
     * 
* * optional uint64 genstampV2 = 3; */ long getGenstampV2(); /** * optional uint64 genstampV1Limit = 4; */ boolean hasGenstampV1Limit(); /** * optional uint64 genstampV1Limit = 4; */ long getGenstampV1Limit(); /** * optional uint64 lastAllocatedBlockId = 5; */ boolean hasLastAllocatedBlockId(); /** * optional uint64 lastAllocatedBlockId = 5; */ long getLastAllocatedBlockId(); /** * optional uint64 transactionId = 6; */ boolean hasTransactionId(); /** * optional uint64 transactionId = 6; */ long getTransactionId(); /** * optional uint64 rollingUpgradeStartTime = 7; */ boolean hasRollingUpgradeStartTime(); /** * optional uint64 rollingUpgradeStartTime = 7; */ long getRollingUpgradeStartTime(); /** * optional uint64 lastAllocatedStripedBlockId = 8; */ boolean hasLastAllocatedStripedBlockId(); /** * optional uint64 lastAllocatedStripedBlockId = 8; */ long getLastAllocatedStripedBlockId(); } /** *
   **
   * Name: NS_INFO
   * 
* * Protobuf type {@code hadoop.hdfs.fsimage.NameSystemSection} */ public static final class NameSystemSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.NameSystemSection) NameSystemSectionOrBuilder { private static final long serialVersionUID = 0L; // Use NameSystemSection.newBuilder() to construct. private NameSystemSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private NameSystemSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private NameSystemSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; namespaceId_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; genstampV1_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; genstampV2_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; genstampV1Limit_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; lastAllocatedBlockId_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; transactionId_ = input.readUInt64(); break; } case 56: { bitField0_ |= 0x00000040; rollingUpgradeStartTime_ = input.readUInt64(); break; } case 64: { bitField0_ |= 0x00000080; lastAllocatedStripedBlockId_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.Builder.class); } private int bitField0_; public static final int NAMESPACEID_FIELD_NUMBER = 1; private int namespaceId_; /** * optional uint32 namespaceId = 1; */ public boolean hasNamespaceId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 namespaceId = 1; */ public int getNamespaceId() { return namespaceId_; } public static final int GENSTAMPV1_FIELD_NUMBER = 2; private long genstampV1_; /** *
     * legacy generation stamp
     * 
* * optional uint64 genstampV1 = 2; */ public boolean hasGenstampV1() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * legacy generation stamp
     * 
* * optional uint64 genstampV1 = 2; */ public long getGenstampV1() { return genstampV1_; } public static final int GENSTAMPV2_FIELD_NUMBER = 3; private long genstampV2_; /** *
     * generation stamp of latest version
     * 
* * optional uint64 genstampV2 = 3; */ public boolean hasGenstampV2() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * generation stamp of latest version
     * 
* * optional uint64 genstampV2 = 3; */ public long getGenstampV2() { return genstampV2_; } public static final int GENSTAMPV1LIMIT_FIELD_NUMBER = 4; private long genstampV1Limit_; /** * optional uint64 genstampV1Limit = 4; */ public boolean hasGenstampV1Limit() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 genstampV1Limit = 4; */ public long getGenstampV1Limit() { return genstampV1Limit_; } public static final int LASTALLOCATEDBLOCKID_FIELD_NUMBER = 5; private long lastAllocatedBlockId_; /** * optional uint64 lastAllocatedBlockId = 5; */ public boolean hasLastAllocatedBlockId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 lastAllocatedBlockId = 5; */ public long getLastAllocatedBlockId() { return lastAllocatedBlockId_; } public static final int TRANSACTIONID_FIELD_NUMBER = 6; private long transactionId_; /** * optional uint64 transactionId = 6; */ public boolean hasTransactionId() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 transactionId = 6; */ public long getTransactionId() { return transactionId_; } public static final int ROLLINGUPGRADESTARTTIME_FIELD_NUMBER = 7; private long rollingUpgradeStartTime_; /** * optional uint64 rollingUpgradeStartTime = 7; */ public boolean hasRollingUpgradeStartTime() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint64 rollingUpgradeStartTime = 7; */ public long getRollingUpgradeStartTime() { return rollingUpgradeStartTime_; } public static final int LASTALLOCATEDSTRIPEDBLOCKID_FIELD_NUMBER = 8; private long lastAllocatedStripedBlockId_; /** * optional uint64 lastAllocatedStripedBlockId = 8; */ public boolean hasLastAllocatedStripedBlockId() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 lastAllocatedStripedBlockId = 8; */ public long getLastAllocatedStripedBlockId() { return lastAllocatedStripedBlockId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, namespaceId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, genstampV1_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, genstampV2_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, genstampV1Limit_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, lastAllocatedBlockId_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, transactionId_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt64(7, rollingUpgradeStartTime_); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt64(8, lastAllocatedStripedBlockId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, namespaceId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, genstampV1_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, genstampV2_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, genstampV1Limit_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, lastAllocatedBlockId_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, transactionId_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(7, rollingUpgradeStartTime_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(8, lastAllocatedStripedBlockId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection) obj; if (hasNamespaceId() != other.hasNamespaceId()) return false; if (hasNamespaceId()) { if (getNamespaceId() != other.getNamespaceId()) return false; } if (hasGenstampV1() != other.hasGenstampV1()) return false; if (hasGenstampV1()) { if (getGenstampV1() != other.getGenstampV1()) return false; } if (hasGenstampV2() != other.hasGenstampV2()) return false; if (hasGenstampV2()) { if (getGenstampV2() != other.getGenstampV2()) return false; } if (hasGenstampV1Limit() != other.hasGenstampV1Limit()) return false; if (hasGenstampV1Limit()) { if (getGenstampV1Limit() != other.getGenstampV1Limit()) return false; } if (hasLastAllocatedBlockId() != other.hasLastAllocatedBlockId()) return false; if (hasLastAllocatedBlockId()) { if (getLastAllocatedBlockId() != other.getLastAllocatedBlockId()) return false; } if (hasTransactionId() != other.hasTransactionId()) return false; if (hasTransactionId()) { if (getTransactionId() != other.getTransactionId()) return false; } if (hasRollingUpgradeStartTime() != other.hasRollingUpgradeStartTime()) return false; if (hasRollingUpgradeStartTime()) { if (getRollingUpgradeStartTime() != other.getRollingUpgradeStartTime()) return false; } if (hasLastAllocatedStripedBlockId() != other.hasLastAllocatedStripedBlockId()) return false; if (hasLastAllocatedStripedBlockId()) { if (getLastAllocatedStripedBlockId() != other.getLastAllocatedStripedBlockId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasNamespaceId()) { hash = (37 * hash) + NAMESPACEID_FIELD_NUMBER; hash = (53 * hash) + getNamespaceId(); } if (hasGenstampV1()) { hash = (37 * hash) + GENSTAMPV1_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getGenstampV1()); } if (hasGenstampV2()) { hash = (37 * hash) + GENSTAMPV2_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getGenstampV2()); } if (hasGenstampV1Limit()) { hash = (37 * hash) + GENSTAMPV1LIMIT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getGenstampV1Limit()); } if (hasLastAllocatedBlockId()) { hash = (37 * hash) + LASTALLOCATEDBLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastAllocatedBlockId()); } if (hasTransactionId()) { hash = (37 * hash) + TRANSACTIONID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getTransactionId()); } if (hasRollingUpgradeStartTime()) { hash = (37 * hash) + ROLLINGUPGRADESTARTTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getRollingUpgradeStartTime()); } if (hasLastAllocatedStripedBlockId()) { hash = (37 * hash) + LASTALLOCATEDSTRIPEDBLOCKID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastAllocatedStripedBlockId()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Name: NS_INFO
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.NameSystemSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.NameSystemSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); namespaceId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); genstampV1_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); genstampV2_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); genstampV1Limit_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); lastAllocatedBlockId_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); transactionId_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); rollingUpgradeStartTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); lastAllocatedStripedBlockId_ = 0L; bitField0_ = (bitField0_ & ~0x00000080); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.namespaceId_ = namespaceId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.genstampV1_ = genstampV1_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.genstampV2_ = genstampV2_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.genstampV1Limit_ = genstampV1Limit_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.lastAllocatedBlockId_ = lastAllocatedBlockId_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.transactionId_ = transactionId_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.rollingUpgradeStartTime_ = rollingUpgradeStartTime_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.lastAllocatedStripedBlockId_ = lastAllocatedStripedBlockId_; to_bitField0_ |= 0x00000080; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.getDefaultInstance()) return this; if (other.hasNamespaceId()) { setNamespaceId(other.getNamespaceId()); } if (other.hasGenstampV1()) { setGenstampV1(other.getGenstampV1()); } if (other.hasGenstampV2()) { setGenstampV2(other.getGenstampV2()); } if (other.hasGenstampV1Limit()) { setGenstampV1Limit(other.getGenstampV1Limit()); } if (other.hasLastAllocatedBlockId()) { setLastAllocatedBlockId(other.getLastAllocatedBlockId()); } if (other.hasTransactionId()) { setTransactionId(other.getTransactionId()); } if (other.hasRollingUpgradeStartTime()) { setRollingUpgradeStartTime(other.getRollingUpgradeStartTime()); } if (other.hasLastAllocatedStripedBlockId()) { setLastAllocatedStripedBlockId(other.getLastAllocatedStripedBlockId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int namespaceId_ ; /** * optional uint32 namespaceId = 1; */ public boolean hasNamespaceId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 namespaceId = 1; */ public int getNamespaceId() { return namespaceId_; } /** * optional uint32 namespaceId = 1; */ public Builder setNamespaceId(int value) { bitField0_ |= 0x00000001; namespaceId_ = value; onChanged(); return this; } /** * optional uint32 namespaceId = 1; */ public Builder clearNamespaceId() { bitField0_ = (bitField0_ & ~0x00000001); namespaceId_ = 0; onChanged(); return this; } private long genstampV1_ ; /** *
       * legacy generation stamp
       * 
* * optional uint64 genstampV1 = 2; */ public boolean hasGenstampV1() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * legacy generation stamp
       * 
* * optional uint64 genstampV1 = 2; */ public long getGenstampV1() { return genstampV1_; } /** *
       * legacy generation stamp
       * 
* * optional uint64 genstampV1 = 2; */ public Builder setGenstampV1(long value) { bitField0_ |= 0x00000002; genstampV1_ = value; onChanged(); return this; } /** *
       * legacy generation stamp
       * 
* * optional uint64 genstampV1 = 2; */ public Builder clearGenstampV1() { bitField0_ = (bitField0_ & ~0x00000002); genstampV1_ = 0L; onChanged(); return this; } private long genstampV2_ ; /** *
       * generation stamp of latest version
       * 
* * optional uint64 genstampV2 = 3; */ public boolean hasGenstampV2() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * generation stamp of latest version
       * 
* * optional uint64 genstampV2 = 3; */ public long getGenstampV2() { return genstampV2_; } /** *
       * generation stamp of latest version
       * 
* * optional uint64 genstampV2 = 3; */ public Builder setGenstampV2(long value) { bitField0_ |= 0x00000004; genstampV2_ = value; onChanged(); return this; } /** *
       * generation stamp of latest version
       * 
* * optional uint64 genstampV2 = 3; */ public Builder clearGenstampV2() { bitField0_ = (bitField0_ & ~0x00000004); genstampV2_ = 0L; onChanged(); return this; } private long genstampV1Limit_ ; /** * optional uint64 genstampV1Limit = 4; */ public boolean hasGenstampV1Limit() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 genstampV1Limit = 4; */ public long getGenstampV1Limit() { return genstampV1Limit_; } /** * optional uint64 genstampV1Limit = 4; */ public Builder setGenstampV1Limit(long value) { bitField0_ |= 0x00000008; genstampV1Limit_ = value; onChanged(); return this; } /** * optional uint64 genstampV1Limit = 4; */ public Builder clearGenstampV1Limit() { bitField0_ = (bitField0_ & ~0x00000008); genstampV1Limit_ = 0L; onChanged(); return this; } private long lastAllocatedBlockId_ ; /** * optional uint64 lastAllocatedBlockId = 5; */ public boolean hasLastAllocatedBlockId() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 lastAllocatedBlockId = 5; */ public long getLastAllocatedBlockId() { return lastAllocatedBlockId_; } /** * optional uint64 lastAllocatedBlockId = 5; */ public Builder setLastAllocatedBlockId(long value) { bitField0_ |= 0x00000010; lastAllocatedBlockId_ = value; onChanged(); return this; } /** * optional uint64 lastAllocatedBlockId = 5; */ public Builder clearLastAllocatedBlockId() { bitField0_ = (bitField0_ & ~0x00000010); lastAllocatedBlockId_ = 0L; onChanged(); return this; } private long transactionId_ ; /** * optional uint64 transactionId = 6; */ public boolean hasTransactionId() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 transactionId = 6; */ public long getTransactionId() { return transactionId_; } /** * optional uint64 transactionId = 6; */ public Builder setTransactionId(long value) { bitField0_ |= 0x00000020; transactionId_ = value; onChanged(); return this; } /** * optional uint64 transactionId = 6; */ public Builder clearTransactionId() { bitField0_ = (bitField0_ & ~0x00000020); transactionId_ = 0L; onChanged(); return this; } private long rollingUpgradeStartTime_ ; /** * optional uint64 rollingUpgradeStartTime = 7; */ public boolean hasRollingUpgradeStartTime() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint64 rollingUpgradeStartTime = 7; */ public long getRollingUpgradeStartTime() { return rollingUpgradeStartTime_; } /** * optional uint64 rollingUpgradeStartTime = 7; */ public Builder setRollingUpgradeStartTime(long value) { bitField0_ |= 0x00000040; rollingUpgradeStartTime_ = value; onChanged(); return this; } /** * optional uint64 rollingUpgradeStartTime = 7; */ public Builder clearRollingUpgradeStartTime() { bitField0_ = (bitField0_ & ~0x00000040); rollingUpgradeStartTime_ = 0L; onChanged(); return this; } private long lastAllocatedStripedBlockId_ ; /** * optional uint64 lastAllocatedStripedBlockId = 8; */ public boolean hasLastAllocatedStripedBlockId() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint64 lastAllocatedStripedBlockId = 8; */ public long getLastAllocatedStripedBlockId() { return lastAllocatedStripedBlockId_; } /** * optional uint64 lastAllocatedStripedBlockId = 8; */ public Builder setLastAllocatedStripedBlockId(long value) { bitField0_ |= 0x00000080; lastAllocatedStripedBlockId_ = value; onChanged(); return this; } /** * optional uint64 lastAllocatedStripedBlockId = 8; */ public Builder clearLastAllocatedStripedBlockId() { bitField0_ = (bitField0_ & ~0x00000080); lastAllocatedStripedBlockId_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.NameSystemSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.NameSystemSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public NameSystemSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new NameSystemSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface INodeSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 lastInodeId = 1; */ boolean hasLastInodeId(); /** * optional uint64 lastInodeId = 1; */ long getLastInodeId(); /** *
     * repeated INodes..
     * 
* * optional uint64 numInodes = 2; */ boolean hasNumInodes(); /** *
     * repeated INodes..
     * 
* * optional uint64 numInodes = 2; */ long getNumInodes(); } /** *
   **
   * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big Endian).
   * The first and the second parts are the string ids of the user and
   * group name, and the last 16 bits are the permission bits.
   * Name: INODE
   * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection} */ public static final class INodeSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection) INodeSectionOrBuilder { private static final long serialVersionUID = 0L; // Use INodeSection.newBuilder() to construct. private INodeSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INodeSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INodeSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; lastInodeId_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; numInodes_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.Builder.class); } public interface FileUnderConstructionFeatureOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional string clientName = 1; */ boolean hasClientName(); /** * optional string clientName = 1; */ java.lang.String getClientName(); /** * optional string clientName = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getClientNameBytes(); /** * optional string clientMachine = 2; */ boolean hasClientMachine(); /** * optional string clientMachine = 2; */ java.lang.String getClientMachine(); /** * optional string clientMachine = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getClientMachineBytes(); } /** *
     **
     * under-construction feature for INodeFile
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature} */ public static final class FileUnderConstructionFeature extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature) FileUnderConstructionFeatureOrBuilder { private static final long serialVersionUID = 0L; // Use FileUnderConstructionFeature.newBuilder() to construct. private FileUnderConstructionFeature(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FileUnderConstructionFeature() { clientName_ = ""; clientMachine_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FileUnderConstructionFeature( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; clientName_ = bs; break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; clientMachine_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder.class); } private int bitField0_; public static final int CLIENTNAME_FIELD_NUMBER = 1; private volatile java.lang.Object clientName_; /** * optional string clientName = 1; */ public boolean hasClientName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string clientName = 1; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * optional string clientName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int CLIENTMACHINE_FIELD_NUMBER = 2; private volatile java.lang.Object clientMachine_; /** * optional string clientMachine = 2; */ public boolean hasClientMachine() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string clientMachine = 2; */ public java.lang.String getClientMachine() { java.lang.Object ref = clientMachine_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientMachine_ = s; } return s; } } /** * optional string clientMachine = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getClientMachineBytes() { java.lang.Object ref = clientMachine_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientMachine_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, clientName_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, clientMachine_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, clientName_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, clientMachine_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature) obj; if (hasClientName() != other.hasClientName()) return false; if (hasClientName()) { if (!getClientName() .equals(other.getClientName())) return false; } if (hasClientMachine() != other.hasClientMachine()) return false; if (hasClientMachine()) { if (!getClientMachine() .equals(other.getClientMachine())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (hasClientMachine()) { hash = (37 * hash) + CLIENTMACHINE_FIELD_NUMBER; hash = (53 * hash) + getClientMachine().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
       **
       * under-construction feature for INodeFile
       * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); clientMachine_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.clientName_ = clientName_; if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.clientMachine_ = clientMachine_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance()) return this; if (other.hasClientName()) { bitField0_ |= 0x00000001; clientName_ = other.clientName_; onChanged(); } if (other.hasClientMachine()) { bitField0_ |= 0x00000002; clientMachine_ = other.clientMachine_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object clientName_ = ""; /** * optional string clientName = 1; */ public boolean hasClientName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional string clientName = 1; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string clientName = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string clientName = 1; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; clientName_ = value; onChanged(); return this; } /** * optional string clientName = 1; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000001); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * optional string clientName = 1; */ public Builder setClientNameBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; clientName_ = value; onChanged(); return this; } private java.lang.Object clientMachine_ = ""; /** * optional string clientMachine = 2; */ public boolean hasClientMachine() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string clientMachine = 2; */ public java.lang.String getClientMachine() { java.lang.Object ref = clientMachine_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientMachine_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string clientMachine = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getClientMachineBytes() { java.lang.Object ref = clientMachine_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientMachine_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string clientMachine = 2; */ public Builder setClientMachine( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientMachine_ = value; onChanged(); return this; } /** * optional string clientMachine = 2; */ public Builder clearClientMachine() { bitField0_ = (bitField0_ & ~0x00000002); clientMachine_ = getDefaultInstance().getClientMachine(); onChanged(); return this; } /** * optional string clientMachine = 2; */ public Builder setClientMachineBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientMachine_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FileUnderConstructionFeature parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new FileUnderConstructionFeature(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface AclFeatureProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * 
* * repeated fixed32 entries = 2 [packed = true]; */ java.util.List getEntriesList(); /** *
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * 
* * repeated fixed32 entries = 2 [packed = true]; */ int getEntriesCount(); /** *
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * 
* * repeated fixed32 entries = 2 [packed = true]; */ int getEntries(int index); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.AclFeatureProto} */ public static final class AclFeatureProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto) AclFeatureProtoOrBuilder { private static final long serialVersionUID = 0L; // Use AclFeatureProto.newBuilder() to construct. private AclFeatureProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private AclFeatureProto() { entries_ = emptyIntList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AclFeatureProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 21: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { entries_ = newIntList(); mutable_bitField0_ |= 0x00000001; } entries_.addInt(input.readFixed32()); break; } case 18: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000001) != 0) && input.getBytesUntilLimit() > 0) { entries_ = newIntList(); mutable_bitField0_ |= 0x00000001; } while (input.getBytesUntilLimit() > 0) { entries_.addInt(input.readFixed32()); } input.popLimit(limit); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { entries_.makeImmutable(); // C } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder.class); } public static final int ENTRIES_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.Internal.IntList entries_; /** *
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * 
* * repeated fixed32 entries = 2 [packed = true]; */ public java.util.List getEntriesList() { return entries_; } /** *
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * 
* * repeated fixed32 entries = 2 [packed = true]; */ public int getEntriesCount() { return entries_.size(); } /** *
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * 
* * repeated fixed32 entries = 2 [packed = true]; */ public int getEntries(int index) { return entries_.getInt(index); } private int entriesMemoizedSerializedSize = -1; private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (getEntriesList().size() > 0) { output.writeUInt32NoTag(18); output.writeUInt32NoTag(entriesMemoizedSerializedSize); } for (int i = 0; i < entries_.size(); i++) { output.writeFixed32NoTag(entries_.getInt(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; dataSize = 4 * getEntriesList().size(); size += dataSize; if (!getEntriesList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } entriesMemoizedSerializedSize = dataSize; } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto) obj; if (!getEntriesList() .equals(other.getEntriesList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getEntriesCount() > 0) { hash = (37 * hash) + ENTRIES_FIELD_NUMBER; hash = (53 * hash) + getEntriesList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.AclFeatureProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); entries_ = emptyIntList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) != 0)) { entries_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000001); } result.entries_ = entries_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) return this; if (!other.entries_.isEmpty()) { if (entries_.isEmpty()) { entries_ = other.entries_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureEntriesIsMutable(); entries_.addAll(other.entries_); } onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.Internal.IntList entries_ = emptyIntList(); private void ensureEntriesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { entries_ = mutableCopy(entries_); bitField0_ |= 0x00000001; } } /** *
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * 
* * repeated fixed32 entries = 2 [packed = true]; */ public java.util.List getEntriesList() { return ((bitField0_ & 0x00000001) != 0) ? java.util.Collections.unmodifiableList(entries_) : entries_; } /** *
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * 
* * repeated fixed32 entries = 2 [packed = true]; */ public int getEntriesCount() { return entries_.size(); } /** *
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * 
* * repeated fixed32 entries = 2 [packed = true]; */ public int getEntries(int index) { return entries_.getInt(index); } /** *
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * 
* * repeated fixed32 entries = 2 [packed = true]; */ public Builder setEntries( int index, int value) { ensureEntriesIsMutable(); entries_.setInt(index, value); onChanged(); return this; } /** *
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * 
* * repeated fixed32 entries = 2 [packed = true]; */ public Builder addEntries(int value) { ensureEntriesIsMutable(); entries_.addInt(value); onChanged(); return this; } /** *
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * 
* * repeated fixed32 entries = 2 [packed = true]; */ public Builder addAllEntries( java.lang.Iterable values) { ensureEntriesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, entries_); onChanged(); return this; } /** *
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * 
* * repeated fixed32 entries = 2 [packed = true]; */ public Builder clearEntries() { entries_ = emptyIntList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public AclFeatureProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new AclFeatureProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface XAttrCompactProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * 
* * required fixed32 name = 1; */ boolean hasName(); /** *
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * 
* * required fixed32 name = 1; */ int getName(); /** * optional bytes value = 2; */ boolean hasValue(); /** * optional bytes value = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getValue(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto} */ public static final class XAttrCompactProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto) XAttrCompactProtoOrBuilder { private static final long serialVersionUID = 0L; // Use XAttrCompactProto.newBuilder() to construct. private XAttrCompactProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private XAttrCompactProto() { value_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private XAttrCompactProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 13: { bitField0_ |= 0x00000001; name_ = input.readFixed32(); break; } case 18: { bitField0_ |= 0x00000002; value_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder.class); } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; private int name_; /** *
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * 
* * required fixed32 name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** *
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * 
* * required fixed32 name = 1; */ public int getName() { return name_; } public static final int VALUE_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString value_; /** * optional bytes value = 2; */ public boolean hasValue() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes value = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getValue() { return value_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeFixed32(1, name_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, value_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeFixed32Size(1, name_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, value_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto) obj; if (hasName() != other.hasName()) return false; if (hasName()) { if (getName() != other.getName()) return false; } if (hasValue() != other.hasValue()) return false; if (hasValue()) { if (!getValue() .equals(other.getValue())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName(); } if (hasValue()) { hash = (37 * hash) + VALUE_FIELD_NUMBER; hash = (53 * hash) + getValue().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); name_ = 0; bitField0_ = (bitField0_ & ~0x00000001); value_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.name_ = name_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance()) return this; if (other.hasName()) { setName(other.getName()); } if (other.hasValue()) { setValue(other.getValue()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasName()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int name_ ; /** *
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * 
* * required fixed32 name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** *
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * 
* * required fixed32 name = 1; */ public int getName() { return name_; } /** *
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * 
* * required fixed32 name = 1; */ public Builder setName(int value) { bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } /** *
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * 
* * required fixed32 name = 1; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); name_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString value_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes value = 2; */ public boolean hasValue() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes value = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getValue() { return value_; } /** * optional bytes value = 2; */ public Builder setValue(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; value_ = value; onChanged(); return this; } /** * optional bytes value = 2; */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); value_ = getDefaultInstance().getValue(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public XAttrCompactProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new XAttrCompactProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface XAttrFeatureProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ java.util.List getXAttrsList(); /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index); /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ int getXAttrsCount(); /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ java.util.List getXAttrsOrBuilderList(); /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto} */ public static final class XAttrFeatureProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto) XAttrFeatureProtoOrBuilder { private static final long serialVersionUID = 0L; // Use XAttrFeatureProto.newBuilder() to construct. private XAttrFeatureProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private XAttrFeatureProto() { xAttrs_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private XAttrFeatureProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { xAttrs_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } xAttrs_.add( input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { xAttrs_ = java.util.Collections.unmodifiableList(xAttrs_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder.class); } public static final int XATTRS_FIELD_NUMBER = 1; private java.util.List xAttrs_; /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public java.util.List getXAttrsList() { return xAttrs_; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public java.util.List getXAttrsOrBuilderList() { return xAttrs_; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public int getXAttrsCount() { return xAttrs_.size(); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index) { return xAttrs_.get(index); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder( int index) { return xAttrs_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getXAttrsCount(); i++) { if (!getXAttrs(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < xAttrs_.size(); i++) { output.writeMessage(1, xAttrs_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < xAttrs_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, xAttrs_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto) obj; if (!getXAttrsList() .equals(other.getXAttrsList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getXAttrsCount() > 0) { hash = (37 * hash) + XATTRS_FIELD_NUMBER; hash = (53 * hash) + getXAttrsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getXAttrsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (xAttrsBuilder_ == null) { xAttrs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { xAttrsBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto(this); int from_bitField0_ = bitField0_; if (xAttrsBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { xAttrs_ = java.util.Collections.unmodifiableList(xAttrs_); bitField0_ = (bitField0_ & ~0x00000001); } result.xAttrs_ = xAttrs_; } else { result.xAttrs_ = xAttrsBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) return this; if (xAttrsBuilder_ == null) { if (!other.xAttrs_.isEmpty()) { if (xAttrs_.isEmpty()) { xAttrs_ = other.xAttrs_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureXAttrsIsMutable(); xAttrs_.addAll(other.xAttrs_); } onChanged(); } } else { if (!other.xAttrs_.isEmpty()) { if (xAttrsBuilder_.isEmpty()) { xAttrsBuilder_.dispose(); xAttrsBuilder_ = null; xAttrs_ = other.xAttrs_; bitField0_ = (bitField0_ & ~0x00000001); xAttrsBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getXAttrsFieldBuilder() : null; } else { xAttrsBuilder_.addAllMessages(other.xAttrs_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getXAttrsCount(); i++) { if (!getXAttrs(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List xAttrs_ = java.util.Collections.emptyList(); private void ensureXAttrsIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { xAttrs_ = new java.util.ArrayList(xAttrs_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> xAttrsBuilder_; /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public java.util.List getXAttrsList() { if (xAttrsBuilder_ == null) { return java.util.Collections.unmodifiableList(xAttrs_); } else { return xAttrsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public int getXAttrsCount() { if (xAttrsBuilder_ == null) { return xAttrs_.size(); } else { return xAttrsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index) { if (xAttrsBuilder_ == null) { return xAttrs_.get(index); } else { return xAttrsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder setXAttrs( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) { if (xAttrsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureXAttrsIsMutable(); xAttrs_.set(index, value); onChanged(); } else { xAttrsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder setXAttrs( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) { if (xAttrsBuilder_ == null) { ensureXAttrsIsMutable(); xAttrs_.set(index, builderForValue.build()); onChanged(); } else { xAttrsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder addXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) { if (xAttrsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureXAttrsIsMutable(); xAttrs_.add(value); onChanged(); } else { xAttrsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder addXAttrs( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) { if (xAttrsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureXAttrsIsMutable(); xAttrs_.add(index, value); onChanged(); } else { xAttrsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder addXAttrs( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) { if (xAttrsBuilder_ == null) { ensureXAttrsIsMutable(); xAttrs_.add(builderForValue.build()); onChanged(); } else { xAttrsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder addXAttrs( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) { if (xAttrsBuilder_ == null) { ensureXAttrsIsMutable(); xAttrs_.add(index, builderForValue.build()); onChanged(); } else { xAttrsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder addAllXAttrs( java.lang.Iterable values) { if (xAttrsBuilder_ == null) { ensureXAttrsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, xAttrs_); onChanged(); } else { xAttrsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder clearXAttrs() { if (xAttrsBuilder_ == null) { xAttrs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { xAttrsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public Builder removeXAttrs(int index) { if (xAttrsBuilder_ == null) { ensureXAttrsIsMutable(); xAttrs_.remove(index); onChanged(); } else { xAttrsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder getXAttrsBuilder( int index) { return getXAttrsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder( int index) { if (xAttrsBuilder_ == null) { return xAttrs_.get(index); } else { return xAttrsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public java.util.List getXAttrsOrBuilderList() { if (xAttrsBuilder_ != null) { return xAttrsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(xAttrs_); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder addXAttrsBuilder() { return getXAttrsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder addXAttrsBuilder( int index) { return getXAttrsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1; */ public java.util.List getXAttrsBuilderList() { return getXAttrsFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> getXAttrsFieldBuilder() { if (xAttrsBuilder_ == null) { xAttrsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder>( xAttrs_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); xAttrs_ = null; } return xAttrsBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public XAttrFeatureProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new XAttrFeatureProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface INodeFileOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INodeFile) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 replication = 1; */ boolean hasReplication(); /** * optional uint32 replication = 1; */ int getReplication(); /** * optional uint64 modificationTime = 2; */ boolean hasModificationTime(); /** * optional uint64 modificationTime = 2; */ long getModificationTime(); /** * optional uint64 accessTime = 3; */ boolean hasAccessTime(); /** * optional uint64 accessTime = 3; */ long getAccessTime(); /** * optional uint64 preferredBlockSize = 4; */ boolean hasPreferredBlockSize(); /** * optional uint64 preferredBlockSize = 4; */ long getPreferredBlockSize(); /** * optional fixed64 permission = 5; */ boolean hasPermission(); /** * optional fixed64 permission = 5; */ long getPermission(); /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ java.util.List getBlocksList(); /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index); /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ int getBlocksCount(); /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ java.util.List getBlocksOrBuilderList(); /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( int index); /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ boolean hasFileUC(); /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC(); /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder(); /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ boolean hasAcl(); /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl(); /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder(); /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ boolean hasXAttrs(); /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs(); /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder(); /** * optional uint32 storagePolicyID = 10; */ boolean hasStoragePolicyID(); /** * optional uint32 storagePolicyID = 10; */ int getStoragePolicyID(); /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ boolean hasBlockType(); /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto getBlockType(); /** * optional uint32 erasureCodingPolicyID = 12; */ boolean hasErasureCodingPolicyID(); /** * optional uint32 erasureCodingPolicyID = 12; */ int getErasureCodingPolicyID(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeFile} */ public static final class INodeFile extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INodeFile) INodeFileOrBuilder { private static final long serialVersionUID = 0L; // Use INodeFile.newBuilder() to construct. private INodeFile(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INodeFile() { blocks_ = java.util.Collections.emptyList(); blockType_ = 0; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INodeFile( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; replication_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; modificationTime_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; accessTime_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; preferredBlockSize_ = input.readUInt64(); break; } case 41: { bitField0_ |= 0x00000010; permission_ = input.readFixed64(); break; } case 50: { if (!((mutable_bitField0_ & 0x00000020) != 0)) { blocks_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000020; } blocks_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry)); break; } case 58: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder subBuilder = null; if (((bitField0_ & 0x00000020) != 0)) { subBuilder = fileUC_.toBuilder(); } fileUC_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fileUC_); fileUC_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000020; break; } case 66: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000040) != 0)) { subBuilder = acl_.toBuilder(); } acl_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(acl_); acl_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000040; break; } case 74: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000080) != 0)) { subBuilder = xAttrs_.toBuilder(); } xAttrs_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(xAttrs_); xAttrs_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000080; break; } case 80: { bitField0_ |= 0x00000100; storagePolicyID_ = input.readUInt32(); break; } case 88: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(11, rawValue); } else { bitField0_ |= 0x00000200; blockType_ = rawValue; } break; } case 96: { bitField0_ |= 0x00000400; erasureCodingPolicyID_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000020) != 0)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder.class); } private int bitField0_; public static final int REPLICATION_FIELD_NUMBER = 1; private int replication_; /** * optional uint32 replication = 1; */ public boolean hasReplication() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 replication = 1; */ public int getReplication() { return replication_; } public static final int MODIFICATIONTIME_FIELD_NUMBER = 2; private long modificationTime_; /** * optional uint64 modificationTime = 2; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 modificationTime = 2; */ public long getModificationTime() { return modificationTime_; } public static final int ACCESSTIME_FIELD_NUMBER = 3; private long accessTime_; /** * optional uint64 accessTime = 3; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 accessTime = 3; */ public long getAccessTime() { return accessTime_; } public static final int PREFERREDBLOCKSIZE_FIELD_NUMBER = 4; private long preferredBlockSize_; /** * optional uint64 preferredBlockSize = 4; */ public boolean hasPreferredBlockSize() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 preferredBlockSize = 4; */ public long getPreferredBlockSize() { return preferredBlockSize_; } public static final int PERMISSION_FIELD_NUMBER = 5; private long permission_; /** * optional fixed64 permission = 5; */ public boolean hasPermission() { return ((bitField0_ & 0x00000010) != 0); } /** * optional fixed64 permission = 5; */ public long getPermission() { return permission_; } public static final int BLOCKS_FIELD_NUMBER = 6; private java.util.List blocks_; /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public java.util.List getBlocksList() { return blocks_; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public java.util.List getBlocksOrBuilderList() { return blocks_; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public int getBlocksCount() { return blocks_.size(); } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) { return blocks_.get(index); } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( int index) { return blocks_.get(index); } public static final int FILEUC_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature fileUC_; /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public boolean hasFileUC() { return ((bitField0_ & 0x00000020) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC() { return fileUC_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder() { return fileUC_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_; } public static final int ACL_FIELD_NUMBER = 8; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_; /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public boolean hasAcl() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } public static final int XATTRS_FIELD_NUMBER = 9; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_; /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public boolean hasXAttrs() { return ((bitField0_ & 0x00000080) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } public static final int STORAGEPOLICYID_FIELD_NUMBER = 10; private int storagePolicyID_; /** * optional uint32 storagePolicyID = 10; */ public boolean hasStoragePolicyID() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint32 storagePolicyID = 10; */ public int getStoragePolicyID() { return storagePolicyID_; } public static final int BLOCKTYPE_FIELD_NUMBER = 11; private int blockType_; /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ public boolean hasBlockType() { return ((bitField0_ & 0x00000200) != 0); } /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto getBlockType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.valueOf(blockType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.CONTIGUOUS : result; } public static final int ERASURECODINGPOLICYID_FIELD_NUMBER = 12; private int erasureCodingPolicyID_; /** * optional uint32 erasureCodingPolicyID = 12; */ public boolean hasErasureCodingPolicyID() { return ((bitField0_ & 0x00000400) != 0); } /** * optional uint32 erasureCodingPolicyID = 12; */ public int getErasureCodingPolicyID() { return erasureCodingPolicyID_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasXAttrs()) { if (!getXAttrs().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, replication_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, modificationTime_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, accessTime_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, preferredBlockSize_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeFixed64(5, permission_); } for (int i = 0; i < blocks_.size(); i++) { output.writeMessage(6, blocks_.get(i)); } if (((bitField0_ & 0x00000020) != 0)) { output.writeMessage(7, getFileUC()); } if (((bitField0_ & 0x00000040) != 0)) { output.writeMessage(8, getAcl()); } if (((bitField0_ & 0x00000080) != 0)) { output.writeMessage(9, getXAttrs()); } if (((bitField0_ & 0x00000100) != 0)) { output.writeUInt32(10, storagePolicyID_); } if (((bitField0_ & 0x00000200) != 0)) { output.writeEnum(11, blockType_); } if (((bitField0_ & 0x00000400) != 0)) { output.writeUInt32(12, erasureCodingPolicyID_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, replication_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, modificationTime_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, accessTime_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, preferredBlockSize_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeFixed64Size(5, permission_); } for (int i = 0; i < blocks_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(6, blocks_.get(i)); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getFileUC()); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(8, getAcl()); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(9, getXAttrs()); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(10, storagePolicyID_); } if (((bitField0_ & 0x00000200) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(11, blockType_); } if (((bitField0_ & 0x00000400) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(12, erasureCodingPolicyID_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile) obj; if (hasReplication() != other.hasReplication()) return false; if (hasReplication()) { if (getReplication() != other.getReplication()) return false; } if (hasModificationTime() != other.hasModificationTime()) return false; if (hasModificationTime()) { if (getModificationTime() != other.getModificationTime()) return false; } if (hasAccessTime() != other.hasAccessTime()) return false; if (hasAccessTime()) { if (getAccessTime() != other.getAccessTime()) return false; } if (hasPreferredBlockSize() != other.hasPreferredBlockSize()) return false; if (hasPreferredBlockSize()) { if (getPreferredBlockSize() != other.getPreferredBlockSize()) return false; } if (hasPermission() != other.hasPermission()) return false; if (hasPermission()) { if (getPermission() != other.getPermission()) return false; } if (!getBlocksList() .equals(other.getBlocksList())) return false; if (hasFileUC() != other.hasFileUC()) return false; if (hasFileUC()) { if (!getFileUC() .equals(other.getFileUC())) return false; } if (hasAcl() != other.hasAcl()) return false; if (hasAcl()) { if (!getAcl() .equals(other.getAcl())) return false; } if (hasXAttrs() != other.hasXAttrs()) return false; if (hasXAttrs()) { if (!getXAttrs() .equals(other.getXAttrs())) return false; } if (hasStoragePolicyID() != other.hasStoragePolicyID()) return false; if (hasStoragePolicyID()) { if (getStoragePolicyID() != other.getStoragePolicyID()) return false; } if (hasBlockType() != other.hasBlockType()) return false; if (hasBlockType()) { if (blockType_ != other.blockType_) return false; } if (hasErasureCodingPolicyID() != other.hasErasureCodingPolicyID()) return false; if (hasErasureCodingPolicyID()) { if (getErasureCodingPolicyID() != other.getErasureCodingPolicyID()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasReplication()) { hash = (37 * hash) + REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getReplication(); } if (hasModificationTime()) { hash = (37 * hash) + MODIFICATIONTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getModificationTime()); } if (hasAccessTime()) { hash = (37 * hash) + ACCESSTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getAccessTime()); } if (hasPreferredBlockSize()) { hash = (37 * hash) + PREFERREDBLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getPreferredBlockSize()); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getPermission()); } if (getBlocksCount() > 0) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocksList().hashCode(); } if (hasFileUC()) { hash = (37 * hash) + FILEUC_FIELD_NUMBER; hash = (53 * hash) + getFileUC().hashCode(); } if (hasAcl()) { hash = (37 * hash) + ACL_FIELD_NUMBER; hash = (53 * hash) + getAcl().hashCode(); } if (hasXAttrs()) { hash = (37 * hash) + XATTRS_FIELD_NUMBER; hash = (53 * hash) + getXAttrs().hashCode(); } if (hasStoragePolicyID()) { hash = (37 * hash) + STORAGEPOLICYID_FIELD_NUMBER; hash = (53 * hash) + getStoragePolicyID(); } if (hasBlockType()) { hash = (37 * hash) + BLOCKTYPE_FIELD_NUMBER; hash = (53 * hash) + blockType_; } if (hasErasureCodingPolicyID()) { hash = (37 * hash) + ERASURECODINGPOLICYID_FIELD_NUMBER; hash = (53 * hash) + getErasureCodingPolicyID(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeFile} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INodeFile) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getBlocksFieldBuilder(); getFileUCFieldBuilder(); getAclFieldBuilder(); getXAttrsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); replication_ = 0; bitField0_ = (bitField0_ & ~0x00000001); modificationTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); accessTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); preferredBlockSize_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); permission_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); } else { blocksBuilder_.clear(); } if (fileUCBuilder_ == null) { fileUC_ = null; } else { fileUCBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); if (aclBuilder_ == null) { acl_ = null; } else { aclBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000080); if (xAttrsBuilder_ == null) { xAttrs_ = null; } else { xAttrsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000100); storagePolicyID_ = 0; bitField0_ = (bitField0_ & ~0x00000200); blockType_ = 0; bitField0_ = (bitField0_ & ~0x00000400); erasureCodingPolicyID_ = 0; bitField0_ = (bitField0_ & ~0x00000800); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.replication_ = replication_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.modificationTime_ = modificationTime_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.accessTime_ = accessTime_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.preferredBlockSize_ = preferredBlockSize_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { result.permission_ = permission_; to_bitField0_ |= 0x00000010; } if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000020) != 0)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); bitField0_ = (bitField0_ & ~0x00000020); } result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } if (((from_bitField0_ & 0x00000040) != 0)) { if (fileUCBuilder_ == null) { result.fileUC_ = fileUC_; } else { result.fileUC_ = fileUCBuilder_.build(); } to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000080) != 0)) { if (aclBuilder_ == null) { result.acl_ = acl_; } else { result.acl_ = aclBuilder_.build(); } to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000100) != 0)) { if (xAttrsBuilder_ == null) { result.xAttrs_ = xAttrs_; } else { result.xAttrs_ = xAttrsBuilder_.build(); } to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000200) != 0)) { result.storagePolicyID_ = storagePolicyID_; to_bitField0_ |= 0x00000100; } if (((from_bitField0_ & 0x00000400) != 0)) { to_bitField0_ |= 0x00000200; } result.blockType_ = blockType_; if (((from_bitField0_ & 0x00000800) != 0)) { result.erasureCodingPolicyID_ = erasureCodingPolicyID_; to_bitField0_ |= 0x00000400; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this; if (other.hasReplication()) { setReplication(other.getReplication()); } if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); } if (other.hasAccessTime()) { setAccessTime(other.getAccessTime()); } if (other.hasPreferredBlockSize()) { setPreferredBlockSize(other.getPreferredBlockSize()); } if (other.hasPermission()) { setPermission(other.getPermission()); } if (blocksBuilder_ == null) { if (!other.blocks_.isEmpty()) { if (blocks_.isEmpty()) { blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureBlocksIsMutable(); blocks_.addAll(other.blocks_); } onChanged(); } } else { if (!other.blocks_.isEmpty()) { if (blocksBuilder_.isEmpty()) { blocksBuilder_.dispose(); blocksBuilder_ = null; blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000020); blocksBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getBlocksFieldBuilder() : null; } else { blocksBuilder_.addAllMessages(other.blocks_); } } } if (other.hasFileUC()) { mergeFileUC(other.getFileUC()); } if (other.hasAcl()) { mergeAcl(other.getAcl()); } if (other.hasXAttrs()) { mergeXAttrs(other.getXAttrs()); } if (other.hasStoragePolicyID()) { setStoragePolicyID(other.getStoragePolicyID()); } if (other.hasBlockType()) { setBlockType(other.getBlockType()); } if (other.hasErasureCodingPolicyID()) { setErasureCodingPolicyID(other.getErasureCodingPolicyID()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { return false; } } if (hasXAttrs()) { if (!getXAttrs().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int replication_ ; /** * optional uint32 replication = 1; */ public boolean hasReplication() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 replication = 1; */ public int getReplication() { return replication_; } /** * optional uint32 replication = 1; */ public Builder setReplication(int value) { bitField0_ |= 0x00000001; replication_ = value; onChanged(); return this; } /** * optional uint32 replication = 1; */ public Builder clearReplication() { bitField0_ = (bitField0_ & ~0x00000001); replication_ = 0; onChanged(); return this; } private long modificationTime_ ; /** * optional uint64 modificationTime = 2; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 modificationTime = 2; */ public long getModificationTime() { return modificationTime_; } /** * optional uint64 modificationTime = 2; */ public Builder setModificationTime(long value) { bitField0_ |= 0x00000002; modificationTime_ = value; onChanged(); return this; } /** * optional uint64 modificationTime = 2; */ public Builder clearModificationTime() { bitField0_ = (bitField0_ & ~0x00000002); modificationTime_ = 0L; onChanged(); return this; } private long accessTime_ ; /** * optional uint64 accessTime = 3; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 accessTime = 3; */ public long getAccessTime() { return accessTime_; } /** * optional uint64 accessTime = 3; */ public Builder setAccessTime(long value) { bitField0_ |= 0x00000004; accessTime_ = value; onChanged(); return this; } /** * optional uint64 accessTime = 3; */ public Builder clearAccessTime() { bitField0_ = (bitField0_ & ~0x00000004); accessTime_ = 0L; onChanged(); return this; } private long preferredBlockSize_ ; /** * optional uint64 preferredBlockSize = 4; */ public boolean hasPreferredBlockSize() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 preferredBlockSize = 4; */ public long getPreferredBlockSize() { return preferredBlockSize_; } /** * optional uint64 preferredBlockSize = 4; */ public Builder setPreferredBlockSize(long value) { bitField0_ |= 0x00000008; preferredBlockSize_ = value; onChanged(); return this; } /** * optional uint64 preferredBlockSize = 4; */ public Builder clearPreferredBlockSize() { bitField0_ = (bitField0_ & ~0x00000008); preferredBlockSize_ = 0L; onChanged(); return this; } private long permission_ ; /** * optional fixed64 permission = 5; */ public boolean hasPermission() { return ((bitField0_ & 0x00000010) != 0); } /** * optional fixed64 permission = 5; */ public long getPermission() { return permission_; } /** * optional fixed64 permission = 5; */ public Builder setPermission(long value) { bitField0_ |= 0x00000010; permission_ = value; onChanged(); return this; } /** * optional fixed64 permission = 5; */ public Builder clearPermission() { bitField0_ = (bitField0_ & ~0x00000010); permission_ = 0L; onChanged(); return this; } private java.util.List blocks_ = java.util.Collections.emptyList(); private void ensureBlocksIsMutable() { if (!((bitField0_ & 0x00000020) != 0)) { blocks_ = new java.util.ArrayList(blocks_); bitField0_ |= 0x00000020; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_; /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public java.util.List getBlocksList() { if (blocksBuilder_ == null) { return java.util.Collections.unmodifiableList(blocks_); } else { return blocksBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public int getBlocksCount() { if (blocksBuilder_ == null) { return blocks_.size(); } else { return blocksBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.set(index, value); onChanged(); } else { blocksBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.set(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(value); onChanged(); } else { blocksBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(index, value); onChanged(); } else { blocksBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder addBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder addAllBlocks( java.lang.Iterable values) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, blocks_); onChanged(); } else { blocksBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); } else { blocksBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public Builder removeBlocks(int index) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.remove(index); onChanged(); } else { blocksBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder( int index) { return getBlocksFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public java.util.List getBlocksOrBuilderList() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blocks_); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() { return getBlocksFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder( int index) { return getBlocksFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockProto blocks = 6; */ public java.util.List getBlocksBuilderList() { return getBlocksFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( blocks_, ((bitField0_ & 0x00000020) != 0), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature fileUC_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder> fileUCBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public boolean hasFileUC() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC() { if (fileUCBuilder_ == null) { return fileUC_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_; } else { return fileUCBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public Builder setFileUC(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature value) { if (fileUCBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fileUC_ = value; onChanged(); } else { fileUCBuilder_.setMessage(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public Builder setFileUC( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder builderForValue) { if (fileUCBuilder_ == null) { fileUC_ = builderForValue.build(); onChanged(); } else { fileUCBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public Builder mergeFileUC(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature value) { if (fileUCBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && fileUC_ != null && fileUC_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance()) { fileUC_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.newBuilder(fileUC_).mergeFrom(value).buildPartial(); } else { fileUC_ = value; } onChanged(); } else { fileUCBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public Builder clearFileUC() { if (fileUCBuilder_ == null) { fileUC_ = null; onChanged(); } else { fileUCBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder getFileUCBuilder() { bitField0_ |= 0x00000040; onChanged(); return getFileUCFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder() { if (fileUCBuilder_ != null) { return fileUCBuilder_.getMessageOrBuilder(); } else { return fileUC_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder> getFileUCFieldBuilder() { if (fileUCBuilder_ == null) { fileUCBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder>( getFileUC(), getParentForChildren(), isClean()); fileUC_ = null; } return fileUCBuilder_; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> aclBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public boolean hasAcl() { return ((bitField0_ & 0x00000080) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() { if (aclBuilder_ == null) { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } else { return aclBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public Builder setAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) { if (aclBuilder_ == null) { if (value == null) { throw new NullPointerException(); } acl_ = value; onChanged(); } else { aclBuilder_.setMessage(value); } bitField0_ |= 0x00000080; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public Builder setAcl( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder builderForValue) { if (aclBuilder_ == null) { acl_ = builderForValue.build(); onChanged(); } else { aclBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000080; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public Builder mergeAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) { if (aclBuilder_ == null) { if (((bitField0_ & 0x00000080) != 0) && acl_ != null && acl_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) { acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.newBuilder(acl_).mergeFrom(value).buildPartial(); } else { acl_ = value; } onChanged(); } else { aclBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public Builder clearAcl() { if (aclBuilder_ == null) { acl_ = null; onChanged(); } else { aclBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000080); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder getAclBuilder() { bitField0_ |= 0x00000080; onChanged(); return getAclFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() { if (aclBuilder_ != null) { return aclBuilder_.getMessageOrBuilder(); } else { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> getAclFieldBuilder() { if (aclBuilder_ == null) { aclBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder>( getAcl(), getParentForChildren(), isClean()); acl_ = null; } return aclBuilder_; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> xAttrsBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public boolean hasXAttrs() { return ((bitField0_ & 0x00000100) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() { if (xAttrsBuilder_ == null) { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } else { return xAttrsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public Builder setXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) { if (xAttrsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } xAttrs_ = value; onChanged(); } else { xAttrsBuilder_.setMessage(value); } bitField0_ |= 0x00000100; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public Builder setXAttrs( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder builderForValue) { if (xAttrsBuilder_ == null) { xAttrs_ = builderForValue.build(); onChanged(); } else { xAttrsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000100; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public Builder mergeXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) { if (xAttrsBuilder_ == null) { if (((bitField0_ & 0x00000100) != 0) && xAttrs_ != null && xAttrs_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) { xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.newBuilder(xAttrs_).mergeFrom(value).buildPartial(); } else { xAttrs_ = value; } onChanged(); } else { xAttrsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000100; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public Builder clearXAttrs() { if (xAttrsBuilder_ == null) { xAttrs_ = null; onChanged(); } else { xAttrsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000100); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder getXAttrsBuilder() { bitField0_ |= 0x00000100; onChanged(); return getXAttrsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() { if (xAttrsBuilder_ != null) { return xAttrsBuilder_.getMessageOrBuilder(); } else { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> getXAttrsFieldBuilder() { if (xAttrsBuilder_ == null) { xAttrsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder>( getXAttrs(), getParentForChildren(), isClean()); xAttrs_ = null; } return xAttrsBuilder_; } private int storagePolicyID_ ; /** * optional uint32 storagePolicyID = 10; */ public boolean hasStoragePolicyID() { return ((bitField0_ & 0x00000200) != 0); } /** * optional uint32 storagePolicyID = 10; */ public int getStoragePolicyID() { return storagePolicyID_; } /** * optional uint32 storagePolicyID = 10; */ public Builder setStoragePolicyID(int value) { bitField0_ |= 0x00000200; storagePolicyID_ = value; onChanged(); return this; } /** * optional uint32 storagePolicyID = 10; */ public Builder clearStoragePolicyID() { bitField0_ = (bitField0_ & ~0x00000200); storagePolicyID_ = 0; onChanged(); return this; } private int blockType_ = 0; /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ public boolean hasBlockType() { return ((bitField0_ & 0x00000400) != 0); } /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto getBlockType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.valueOf(blockType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.CONTIGUOUS : result; } /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ public Builder setBlockType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000400; blockType_ = value.getNumber(); onChanged(); return this; } /** * optional .hadoop.hdfs.BlockTypeProto blockType = 11; */ public Builder clearBlockType() { bitField0_ = (bitField0_ & ~0x00000400); blockType_ = 0; onChanged(); return this; } private int erasureCodingPolicyID_ ; /** * optional uint32 erasureCodingPolicyID = 12; */ public boolean hasErasureCodingPolicyID() { return ((bitField0_ & 0x00000800) != 0); } /** * optional uint32 erasureCodingPolicyID = 12; */ public int getErasureCodingPolicyID() { return erasureCodingPolicyID_; } /** * optional uint32 erasureCodingPolicyID = 12; */ public Builder setErasureCodingPolicyID(int value) { bitField0_ |= 0x00000800; erasureCodingPolicyID_ = value; onChanged(); return this; } /** * optional uint32 erasureCodingPolicyID = 12; */ public Builder clearErasureCodingPolicyID() { bitField0_ = (bitField0_ & ~0x00000800); erasureCodingPolicyID_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeFile) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeFile) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INodeFile parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INodeFile(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface QuotaByStorageTypeEntryProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ boolean hasStorageType(); /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType(); /** * required uint64 quota = 2; */ boolean hasQuota(); /** * required uint64 quota = 2; */ long getQuota(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto} */ public static final class QuotaByStorageTypeEntryProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto) QuotaByStorageTypeEntryProtoOrBuilder { private static final long serialVersionUID = 0L; // Use QuotaByStorageTypeEntryProto.newBuilder() to construct. private QuotaByStorageTypeEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private QuotaByStorageTypeEntryProto() { storageType_ = 1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private QuotaByStorageTypeEntryProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; storageType_ = rawValue; } break; } case 16: { bitField0_ |= 0x00000002; quota_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder.class); } private int bitField0_; public static final int STORAGETYPE_FIELD_NUMBER = 1; private int storageType_; /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } public static final int QUOTA_FIELD_NUMBER = 2; private long quota_; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasStorageType()) { memoizedIsInitialized = 0; return false; } if (!hasQuota()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, storageType_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, quota_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, storageType_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, quota_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto) obj; if (hasStorageType() != other.hasStorageType()) return false; if (hasStorageType()) { if (storageType_ != other.storageType_) return false; } if (hasQuota() != other.hasQuota()) return false; if (hasQuota()) { if (getQuota() != other.getQuota()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasStorageType()) { hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER; hash = (53 * hash) + storageType_; } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getQuota()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); storageType_ = 1; bitField0_ = (bitField0_ & ~0x00000001); quota_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.storageType_ = storageType_; if (((from_bitField0_ & 0x00000002) != 0)) { result.quota_ = quota_; to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance()) return this; if (other.hasStorageType()) { setStorageType(other.getStorageType()); } if (other.hasQuota()) { setQuota(other.getQuota()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasStorageType()) { return false; } if (!hasQuota()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int storageType_ = 1; /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_); return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result; } /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageType_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.StorageTypeProto storageType = 1; */ public Builder clearStorageType() { bitField0_ = (bitField0_ & ~0x00000001); storageType_ = 1; onChanged(); return this; } private long quota_ ; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } /** * required uint64 quota = 2; */ public Builder setQuota(long value) { bitField0_ |= 0x00000002; quota_ = value; onChanged(); return this; } /** * required uint64 quota = 2; */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000002); quota_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public QuotaByStorageTypeEntryProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new QuotaByStorageTypeEntryProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface QuotaByStorageTypeFeatureProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ java.util.List getQuotasList(); /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index); /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ int getQuotasCount(); /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ java.util.List getQuotasOrBuilderList(); /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto} */ public static final class QuotaByStorageTypeFeatureProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto) QuotaByStorageTypeFeatureProtoOrBuilder { private static final long serialVersionUID = 0L; // Use QuotaByStorageTypeFeatureProto.newBuilder() to construct. private QuotaByStorageTypeFeatureProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private QuotaByStorageTypeFeatureProto() { quotas_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private QuotaByStorageTypeFeatureProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { quotas_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } quotas_.add( input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { quotas_ = java.util.Collections.unmodifiableList(quotas_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder.class); } public static final int QUOTAS_FIELD_NUMBER = 1; private java.util.List quotas_; /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public java.util.List getQuotasList() { return quotas_; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public java.util.List getQuotasOrBuilderList() { return quotas_; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public int getQuotasCount() { return quotas_.size(); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index) { return quotas_.get(index); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder( int index) { return quotas_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getQuotasCount(); i++) { if (!getQuotas(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < quotas_.size(); i++) { output.writeMessage(1, quotas_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < quotas_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, quotas_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto) obj; if (!getQuotasList() .equals(other.getQuotasList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getQuotasCount() > 0) { hash = (37 * hash) + QUOTAS_FIELD_NUMBER; hash = (53 * hash) + getQuotasList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getQuotasFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (quotasBuilder_ == null) { quotas_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { quotasBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto(this); int from_bitField0_ = bitField0_; if (quotasBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { quotas_ = java.util.Collections.unmodifiableList(quotas_); bitField0_ = (bitField0_ & ~0x00000001); } result.quotas_ = quotas_; } else { result.quotas_ = quotasBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance()) return this; if (quotasBuilder_ == null) { if (!other.quotas_.isEmpty()) { if (quotas_.isEmpty()) { quotas_ = other.quotas_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureQuotasIsMutable(); quotas_.addAll(other.quotas_); } onChanged(); } } else { if (!other.quotas_.isEmpty()) { if (quotasBuilder_.isEmpty()) { quotasBuilder_.dispose(); quotasBuilder_ = null; quotas_ = other.quotas_; bitField0_ = (bitField0_ & ~0x00000001); quotasBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getQuotasFieldBuilder() : null; } else { quotasBuilder_.addAllMessages(other.quotas_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getQuotasCount(); i++) { if (!getQuotas(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List quotas_ = java.util.Collections.emptyList(); private void ensureQuotasIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { quotas_ = new java.util.ArrayList(quotas_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> quotasBuilder_; /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public java.util.List getQuotasList() { if (quotasBuilder_ == null) { return java.util.Collections.unmodifiableList(quotas_); } else { return quotasBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public int getQuotasCount() { if (quotasBuilder_ == null) { return quotas_.size(); } else { return quotasBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index) { if (quotasBuilder_ == null) { return quotas_.get(index); } else { return quotasBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder setQuotas( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) { if (quotasBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureQuotasIsMutable(); quotas_.set(index, value); onChanged(); } else { quotasBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder setQuotas( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) { if (quotasBuilder_ == null) { ensureQuotasIsMutable(); quotas_.set(index, builderForValue.build()); onChanged(); } else { quotasBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder addQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) { if (quotasBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureQuotasIsMutable(); quotas_.add(value); onChanged(); } else { quotasBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder addQuotas( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) { if (quotasBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureQuotasIsMutable(); quotas_.add(index, value); onChanged(); } else { quotasBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder addQuotas( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) { if (quotasBuilder_ == null) { ensureQuotasIsMutable(); quotas_.add(builderForValue.build()); onChanged(); } else { quotasBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder addQuotas( int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) { if (quotasBuilder_ == null) { ensureQuotasIsMutable(); quotas_.add(index, builderForValue.build()); onChanged(); } else { quotasBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder addAllQuotas( java.lang.Iterable values) { if (quotasBuilder_ == null) { ensureQuotasIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, quotas_); onChanged(); } else { quotasBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder clearQuotas() { if (quotasBuilder_ == null) { quotas_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { quotasBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public Builder removeQuotas(int index) { if (quotasBuilder_ == null) { ensureQuotasIsMutable(); quotas_.remove(index); onChanged(); } else { quotasBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder getQuotasBuilder( int index) { return getQuotasFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder( int index) { if (quotasBuilder_ == null) { return quotas_.get(index); } else { return quotasBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public java.util.List getQuotasOrBuilderList() { if (quotasBuilder_ != null) { return quotasBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(quotas_); } } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder addQuotasBuilder() { return getQuotasFieldBuilder().addBuilder( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder addQuotasBuilder( int index) { return getQuotasFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1; */ public java.util.List getQuotasBuilderList() { return getQuotasFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> getQuotasFieldBuilder() { if (quotasBuilder_ == null) { quotasBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder>( quotas_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); quotas_ = null; } return quotasBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public QuotaByStorageTypeFeatureProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new QuotaByStorageTypeFeatureProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface INodeDirectoryOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INodeDirectory) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 modificationTime = 1; */ boolean hasModificationTime(); /** * optional uint64 modificationTime = 1; */ long getModificationTime(); /** *
       * namespace quota
       * 
* * optional uint64 nsQuota = 2; */ boolean hasNsQuota(); /** *
       * namespace quota
       * 
* * optional uint64 nsQuota = 2; */ long getNsQuota(); /** *
       * diskspace quota
       * 
* * optional uint64 dsQuota = 3; */ boolean hasDsQuota(); /** *
       * diskspace quota
       * 
* * optional uint64 dsQuota = 3; */ long getDsQuota(); /** * optional fixed64 permission = 4; */ boolean hasPermission(); /** * optional fixed64 permission = 4; */ long getPermission(); /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ boolean hasAcl(); /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl(); /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder(); /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ boolean hasXAttrs(); /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs(); /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder(); /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ boolean hasTypeQuotas(); /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas(); /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeDirectory} */ public static final class INodeDirectory extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INodeDirectory) INodeDirectoryOrBuilder { private static final long serialVersionUID = 0L; // Use INodeDirectory.newBuilder() to construct. private INodeDirectory(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INodeDirectory() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INodeDirectory( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; modificationTime_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; nsQuota_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; dsQuota_ = input.readUInt64(); break; } case 33: { bitField0_ |= 0x00000008; permission_ = input.readFixed64(); break; } case 42: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000010) != 0)) { subBuilder = acl_.toBuilder(); } acl_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(acl_); acl_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } case 50: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000020) != 0)) { subBuilder = xAttrs_.toBuilder(); } xAttrs_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(xAttrs_); xAttrs_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000020; break; } case 58: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000040) != 0)) { subBuilder = typeQuotas_.toBuilder(); } typeQuotas_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(typeQuotas_); typeQuotas_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000040; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder.class); } private int bitField0_; public static final int MODIFICATIONTIME_FIELD_NUMBER = 1; private long modificationTime_; /** * optional uint64 modificationTime = 1; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 modificationTime = 1; */ public long getModificationTime() { return modificationTime_; } public static final int NSQUOTA_FIELD_NUMBER = 2; private long nsQuota_; /** *
       * namespace quota
       * 
* * optional uint64 nsQuota = 2; */ public boolean hasNsQuota() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * namespace quota
       * 
* * optional uint64 nsQuota = 2; */ public long getNsQuota() { return nsQuota_; } public static final int DSQUOTA_FIELD_NUMBER = 3; private long dsQuota_; /** *
       * diskspace quota
       * 
* * optional uint64 dsQuota = 3; */ public boolean hasDsQuota() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * diskspace quota
       * 
* * optional uint64 dsQuota = 3; */ public long getDsQuota() { return dsQuota_; } public static final int PERMISSION_FIELD_NUMBER = 4; private long permission_; /** * optional fixed64 permission = 4; */ public boolean hasPermission() { return ((bitField0_ & 0x00000008) != 0); } /** * optional fixed64 permission = 4; */ public long getPermission() { return permission_; } public static final int ACL_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_; /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public boolean hasAcl() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } public static final int XATTRS_FIELD_NUMBER = 6; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_; /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public boolean hasXAttrs() { return ((bitField0_ & 0x00000020) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } public static final int TYPEQUOTAS_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas_; /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public boolean hasTypeQuotas() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas() { return typeQuotas_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder() { return typeQuotas_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (hasXAttrs()) { if (!getXAttrs().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasTypeQuotas()) { if (!getTypeQuotas().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, modificationTime_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, nsQuota_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, dsQuota_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeFixed64(4, permission_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(5, getAcl()); } if (((bitField0_ & 0x00000020) != 0)) { output.writeMessage(6, getXAttrs()); } if (((bitField0_ & 0x00000040) != 0)) { output.writeMessage(7, getTypeQuotas()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, modificationTime_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, nsQuota_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, dsQuota_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeFixed64Size(4, permission_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getAcl()); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(6, getXAttrs()); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(7, getTypeQuotas()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory) obj; if (hasModificationTime() != other.hasModificationTime()) return false; if (hasModificationTime()) { if (getModificationTime() != other.getModificationTime()) return false; } if (hasNsQuota() != other.hasNsQuota()) return false; if (hasNsQuota()) { if (getNsQuota() != other.getNsQuota()) return false; } if (hasDsQuota() != other.hasDsQuota()) return false; if (hasDsQuota()) { if (getDsQuota() != other.getDsQuota()) return false; } if (hasPermission() != other.hasPermission()) return false; if (hasPermission()) { if (getPermission() != other.getPermission()) return false; } if (hasAcl() != other.hasAcl()) return false; if (hasAcl()) { if (!getAcl() .equals(other.getAcl())) return false; } if (hasXAttrs() != other.hasXAttrs()) return false; if (hasXAttrs()) { if (!getXAttrs() .equals(other.getXAttrs())) return false; } if (hasTypeQuotas() != other.hasTypeQuotas()) return false; if (hasTypeQuotas()) { if (!getTypeQuotas() .equals(other.getTypeQuotas())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasModificationTime()) { hash = (37 * hash) + MODIFICATIONTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getModificationTime()); } if (hasNsQuota()) { hash = (37 * hash) + NSQUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNsQuota()); } if (hasDsQuota()) { hash = (37 * hash) + DSQUOTA_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getDsQuota()); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getPermission()); } if (hasAcl()) { hash = (37 * hash) + ACL_FIELD_NUMBER; hash = (53 * hash) + getAcl().hashCode(); } if (hasXAttrs()) { hash = (37 * hash) + XATTRS_FIELD_NUMBER; hash = (53 * hash) + getXAttrs().hashCode(); } if (hasTypeQuotas()) { hash = (37 * hash) + TYPEQUOTAS_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotas().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeDirectory} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INodeDirectory) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getAclFieldBuilder(); getXAttrsFieldBuilder(); getTypeQuotasFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); modificationTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); nsQuota_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); dsQuota_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); permission_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); if (aclBuilder_ == null) { acl_ = null; } else { aclBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); if (xAttrsBuilder_ == null) { xAttrs_ = null; } else { xAttrsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); if (typeQuotasBuilder_ == null) { typeQuotas_ = null; } else { typeQuotasBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.modificationTime_ = modificationTime_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.nsQuota_ = nsQuota_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.dsQuota_ = dsQuota_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.permission_ = permission_; to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { if (aclBuilder_ == null) { result.acl_ = acl_; } else { result.acl_ = aclBuilder_.build(); } to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { if (xAttrsBuilder_ == null) { result.xAttrs_ = xAttrs_; } else { result.xAttrs_ = xAttrsBuilder_.build(); } to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { if (typeQuotasBuilder_ == null) { result.typeQuotas_ = typeQuotas_; } else { result.typeQuotas_ = typeQuotasBuilder_.build(); } to_bitField0_ |= 0x00000040; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) return this; if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); } if (other.hasNsQuota()) { setNsQuota(other.getNsQuota()); } if (other.hasDsQuota()) { setDsQuota(other.getDsQuota()); } if (other.hasPermission()) { setPermission(other.getPermission()); } if (other.hasAcl()) { mergeAcl(other.getAcl()); } if (other.hasXAttrs()) { mergeXAttrs(other.getXAttrs()); } if (other.hasTypeQuotas()) { mergeTypeQuotas(other.getTypeQuotas()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (hasXAttrs()) { if (!getXAttrs().isInitialized()) { return false; } } if (hasTypeQuotas()) { if (!getTypeQuotas().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long modificationTime_ ; /** * optional uint64 modificationTime = 1; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 modificationTime = 1; */ public long getModificationTime() { return modificationTime_; } /** * optional uint64 modificationTime = 1; */ public Builder setModificationTime(long value) { bitField0_ |= 0x00000001; modificationTime_ = value; onChanged(); return this; } /** * optional uint64 modificationTime = 1; */ public Builder clearModificationTime() { bitField0_ = (bitField0_ & ~0x00000001); modificationTime_ = 0L; onChanged(); return this; } private long nsQuota_ ; /** *
         * namespace quota
         * 
* * optional uint64 nsQuota = 2; */ public boolean hasNsQuota() { return ((bitField0_ & 0x00000002) != 0); } /** *
         * namespace quota
         * 
* * optional uint64 nsQuota = 2; */ public long getNsQuota() { return nsQuota_; } /** *
         * namespace quota
         * 
* * optional uint64 nsQuota = 2; */ public Builder setNsQuota(long value) { bitField0_ |= 0x00000002; nsQuota_ = value; onChanged(); return this; } /** *
         * namespace quota
         * 
* * optional uint64 nsQuota = 2; */ public Builder clearNsQuota() { bitField0_ = (bitField0_ & ~0x00000002); nsQuota_ = 0L; onChanged(); return this; } private long dsQuota_ ; /** *
         * diskspace quota
         * 
* * optional uint64 dsQuota = 3; */ public boolean hasDsQuota() { return ((bitField0_ & 0x00000004) != 0); } /** *
         * diskspace quota
         * 
* * optional uint64 dsQuota = 3; */ public long getDsQuota() { return dsQuota_; } /** *
         * diskspace quota
         * 
* * optional uint64 dsQuota = 3; */ public Builder setDsQuota(long value) { bitField0_ |= 0x00000004; dsQuota_ = value; onChanged(); return this; } /** *
         * diskspace quota
         * 
* * optional uint64 dsQuota = 3; */ public Builder clearDsQuota() { bitField0_ = (bitField0_ & ~0x00000004); dsQuota_ = 0L; onChanged(); return this; } private long permission_ ; /** * optional fixed64 permission = 4; */ public boolean hasPermission() { return ((bitField0_ & 0x00000008) != 0); } /** * optional fixed64 permission = 4; */ public long getPermission() { return permission_; } /** * optional fixed64 permission = 4; */ public Builder setPermission(long value) { bitField0_ |= 0x00000008; permission_ = value; onChanged(); return this; } /** * optional fixed64 permission = 4; */ public Builder clearPermission() { bitField0_ = (bitField0_ & ~0x00000008); permission_ = 0L; onChanged(); return this; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> aclBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public boolean hasAcl() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() { if (aclBuilder_ == null) { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } else { return aclBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public Builder setAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) { if (aclBuilder_ == null) { if (value == null) { throw new NullPointerException(); } acl_ = value; onChanged(); } else { aclBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public Builder setAcl( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder builderForValue) { if (aclBuilder_ == null) { acl_ = builderForValue.build(); onChanged(); } else { aclBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public Builder mergeAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) { if (aclBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && acl_ != null && acl_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) { acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.newBuilder(acl_).mergeFrom(value).buildPartial(); } else { acl_ = value; } onChanged(); } else { aclBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public Builder clearAcl() { if (aclBuilder_ == null) { acl_ = null; onChanged(); } else { aclBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder getAclBuilder() { bitField0_ |= 0x00000010; onChanged(); return getAclFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() { if (aclBuilder_ != null) { return aclBuilder_.getMessageOrBuilder(); } else { return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> getAclFieldBuilder() { if (aclBuilder_ == null) { aclBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder>( getAcl(), getParentForChildren(), isClean()); acl_ = null; } return aclBuilder_; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> xAttrsBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public boolean hasXAttrs() { return ((bitField0_ & 0x00000020) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() { if (xAttrsBuilder_ == null) { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } else { return xAttrsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public Builder setXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) { if (xAttrsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } xAttrs_ = value; onChanged(); } else { xAttrsBuilder_.setMessage(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public Builder setXAttrs( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder builderForValue) { if (xAttrsBuilder_ == null) { xAttrs_ = builderForValue.build(); onChanged(); } else { xAttrsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public Builder mergeXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) { if (xAttrsBuilder_ == null) { if (((bitField0_ & 0x00000020) != 0) && xAttrs_ != null && xAttrs_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) { xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.newBuilder(xAttrs_).mergeFrom(value).buildPartial(); } else { xAttrs_ = value; } onChanged(); } else { xAttrsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public Builder clearXAttrs() { if (xAttrsBuilder_ == null) { xAttrs_ = null; onChanged(); } else { xAttrsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder getXAttrsBuilder() { bitField0_ |= 0x00000020; onChanged(); return getXAttrsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() { if (xAttrsBuilder_ != null) { return xAttrsBuilder_.getMessageOrBuilder(); } else { return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> getXAttrsFieldBuilder() { if (xAttrsBuilder_ == null) { xAttrsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder>( getXAttrs(), getParentForChildren(), isClean()); xAttrs_ = null; } return xAttrsBuilder_; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder> typeQuotasBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public boolean hasTypeQuotas() { return ((bitField0_ & 0x00000040) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas() { if (typeQuotasBuilder_ == null) { return typeQuotas_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_; } else { return typeQuotasBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public Builder setTypeQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto value) { if (typeQuotasBuilder_ == null) { if (value == null) { throw new NullPointerException(); } typeQuotas_ = value; onChanged(); } else { typeQuotasBuilder_.setMessage(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public Builder setTypeQuotas( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder builderForValue) { if (typeQuotasBuilder_ == null) { typeQuotas_ = builderForValue.build(); onChanged(); } else { typeQuotasBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public Builder mergeTypeQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto value) { if (typeQuotasBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0) && typeQuotas_ != null && typeQuotas_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance()) { typeQuotas_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.newBuilder(typeQuotas_).mergeFrom(value).buildPartial(); } else { typeQuotas_ = value; } onChanged(); } else { typeQuotasBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public Builder clearTypeQuotas() { if (typeQuotasBuilder_ == null) { typeQuotas_ = null; onChanged(); } else { typeQuotasBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder getTypeQuotasBuilder() { bitField0_ |= 0x00000040; onChanged(); return getTypeQuotasFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder() { if (typeQuotasBuilder_ != null) { return typeQuotasBuilder_.getMessageOrBuilder(); } else { return typeQuotas_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder> getTypeQuotasFieldBuilder() { if (typeQuotasBuilder_ == null) { typeQuotasBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder>( getTypeQuotas(), getParentForChildren(), isClean()); typeQuotas_ = null; } return typeQuotasBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeDirectory) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeDirectory) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INodeDirectory parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INodeDirectory(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface INodeSymlinkOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INodeSymlink) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional fixed64 permission = 1; */ boolean hasPermission(); /** * optional fixed64 permission = 1; */ long getPermission(); /** * optional bytes target = 2; */ boolean hasTarget(); /** * optional bytes target = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getTarget(); /** * optional uint64 modificationTime = 3; */ boolean hasModificationTime(); /** * optional uint64 modificationTime = 3; */ long getModificationTime(); /** * optional uint64 accessTime = 4; */ boolean hasAccessTime(); /** * optional uint64 accessTime = 4; */ long getAccessTime(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeSymlink} */ public static final class INodeSymlink extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INodeSymlink) INodeSymlinkOrBuilder { private static final long serialVersionUID = 0L; // Use INodeSymlink.newBuilder() to construct. private INodeSymlink(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INodeSymlink() { target_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INodeSymlink( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 9: { bitField0_ |= 0x00000001; permission_ = input.readFixed64(); break; } case 18: { bitField0_ |= 0x00000002; target_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; modificationTime_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; accessTime_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder.class); } private int bitField0_; public static final int PERMISSION_FIELD_NUMBER = 1; private long permission_; /** * optional fixed64 permission = 1; */ public boolean hasPermission() { return ((bitField0_ & 0x00000001) != 0); } /** * optional fixed64 permission = 1; */ public long getPermission() { return permission_; } public static final int TARGET_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString target_; /** * optional bytes target = 2; */ public boolean hasTarget() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes target = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getTarget() { return target_; } public static final int MODIFICATIONTIME_FIELD_NUMBER = 3; private long modificationTime_; /** * optional uint64 modificationTime = 3; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 modificationTime = 3; */ public long getModificationTime() { return modificationTime_; } public static final int ACCESSTIME_FIELD_NUMBER = 4; private long accessTime_; /** * optional uint64 accessTime = 4; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 accessTime = 4; */ public long getAccessTime() { return accessTime_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeFixed64(1, permission_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, target_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, modificationTime_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt64(4, accessTime_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeFixed64Size(1, permission_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, target_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, modificationTime_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(4, accessTime_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink) obj; if (hasPermission() != other.hasPermission()) return false; if (hasPermission()) { if (getPermission() != other.getPermission()) return false; } if (hasTarget() != other.hasTarget()) return false; if (hasTarget()) { if (!getTarget() .equals(other.getTarget())) return false; } if (hasModificationTime() != other.hasModificationTime()) return false; if (hasModificationTime()) { if (getModificationTime() != other.getModificationTime()) return false; } if (hasAccessTime() != other.hasAccessTime()) return false; if (hasAccessTime()) { if (getAccessTime() != other.getAccessTime()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getPermission()); } if (hasTarget()) { hash = (37 * hash) + TARGET_FIELD_NUMBER; hash = (53 * hash) + getTarget().hashCode(); } if (hasModificationTime()) { hash = (37 * hash) + MODIFICATIONTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getModificationTime()); } if (hasAccessTime()) { hash = (37 * hash) + ACCESSTIME_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getAccessTime()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeSymlink} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INodeSymlink) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); permission_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); target_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); modificationTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); accessTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.permission_ = permission_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.target_ = target_; if (((from_bitField0_ & 0x00000004) != 0)) { result.modificationTime_ = modificationTime_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.accessTime_ = accessTime_; to_bitField0_ |= 0x00000008; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance()) return this; if (other.hasPermission()) { setPermission(other.getPermission()); } if (other.hasTarget()) { setTarget(other.getTarget()); } if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); } if (other.hasAccessTime()) { setAccessTime(other.getAccessTime()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long permission_ ; /** * optional fixed64 permission = 1; */ public boolean hasPermission() { return ((bitField0_ & 0x00000001) != 0); } /** * optional fixed64 permission = 1; */ public long getPermission() { return permission_; } /** * optional fixed64 permission = 1; */ public Builder setPermission(long value) { bitField0_ |= 0x00000001; permission_ = value; onChanged(); return this; } /** * optional fixed64 permission = 1; */ public Builder clearPermission() { bitField0_ = (bitField0_ & ~0x00000001); permission_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString target_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes target = 2; */ public boolean hasTarget() { return ((bitField0_ & 0x00000002) != 0); } /** * optional bytes target = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getTarget() { return target_; } /** * optional bytes target = 2; */ public Builder setTarget(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; target_ = value; onChanged(); return this; } /** * optional bytes target = 2; */ public Builder clearTarget() { bitField0_ = (bitField0_ & ~0x00000002); target_ = getDefaultInstance().getTarget(); onChanged(); return this; } private long modificationTime_ ; /** * optional uint64 modificationTime = 3; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint64 modificationTime = 3; */ public long getModificationTime() { return modificationTime_; } /** * optional uint64 modificationTime = 3; */ public Builder setModificationTime(long value) { bitField0_ |= 0x00000004; modificationTime_ = value; onChanged(); return this; } /** * optional uint64 modificationTime = 3; */ public Builder clearModificationTime() { bitField0_ = (bitField0_ & ~0x00000004); modificationTime_ = 0L; onChanged(); return this; } private long accessTime_ ; /** * optional uint64 accessTime = 4; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000008) != 0); } /** * optional uint64 accessTime = 4; */ public long getAccessTime() { return accessTime_; } /** * optional uint64 accessTime = 4; */ public Builder setAccessTime(long value) { bitField0_ |= 0x00000008; accessTime_ = value; onChanged(); return this; } /** * optional uint64 accessTime = 4; */ public Builder clearAccessTime() { bitField0_ = (bitField0_ & ~0x00000008); accessTime_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeSymlink) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeSymlink) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INodeSymlink parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INodeSymlink(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface INodeOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INode) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ boolean hasType(); /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType(); /** * required uint64 id = 2; */ boolean hasId(); /** * required uint64 id = 2; */ long getId(); /** * optional bytes name = 3; */ boolean hasName(); /** * optional bytes name = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getName(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ boolean hasFile(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ boolean hasDirectory(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ boolean hasSymlink(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INode} */ public static final class INode extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INode) INodeOrBuilder { private static final long serialVersionUID = 0L; // Use INode.newBuilder() to construct. private INode(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INode() { type_ = 1; name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INode( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type value = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; type_ = rawValue; } break; } case 16: { bitField0_ |= 0x00000002; id_ = input.readUInt64(); break; } case 26: { bitField0_ |= 0x00000004; name_ = input.readBytes(); break; } case 34: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder subBuilder = null; if (((bitField0_ & 0x00000008) != 0)) { subBuilder = file_.toBuilder(); } file_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(file_); file_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder subBuilder = null; if (((bitField0_ & 0x00000010) != 0)) { subBuilder = directory_.toBuilder(); } directory_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(directory_); directory_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } case 50: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder subBuilder = null; if (((bitField0_ & 0x00000020) != 0)) { subBuilder = symlink_.toBuilder(); } symlink_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(symlink_); symlink_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000020; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.fsimage.INodeSection.INode.Type} */ public enum Type implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * FILE = 1; */ FILE(1), /** * DIRECTORY = 2; */ DIRECTORY(2), /** * SYMLINK = 3; */ SYMLINK(3), ; /** * FILE = 1; */ public static final int FILE_VALUE = 1; /** * DIRECTORY = 2; */ public static final int DIRECTORY_VALUE = 2; /** * SYMLINK = 3; */ public static final int SYMLINK_VALUE = 3; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static Type valueOf(int value) { return forNumber(value); } public static Type forNumber(int value) { switch (value) { case 1: return FILE; case 2: return DIRECTORY; case 3: return SYMLINK; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< Type> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public Type findValueByNumber(int number) { return Type.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDescriptor().getEnumTypes().get(0); } private static final Type[] VALUES = values(); public static Type valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private Type(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.fsimage.INodeSection.INode.Type) } private int bitField0_; public static final int TYPE_FIELD_NUMBER = 1; private int type_; /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.valueOf(type_); return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE : result; } public static final int ID_FIELD_NUMBER = 2; private long id_; /** * required uint64 id = 2; */ public boolean hasId() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 id = 2; */ public long getId() { return id_; } public static final int NAME_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString name_; /** * optional bytes name = 3; */ public boolean hasName() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes name = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } public static final int FILE_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile file_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public boolean hasFile() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile() { return file_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder() { return file_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_; } public static final int DIRECTORY_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory directory_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public boolean hasDirectory() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory() { return directory_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder() { return directory_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_; } public static final int SYMLINK_FIELD_NUMBER = 6; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink symlink_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public boolean hasSymlink() { return ((bitField0_ & 0x00000020) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink() { return symlink_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder() { return symlink_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasType()) { memoizedIsInitialized = 0; return false; } if (!hasId()) { memoizedIsInitialized = 0; return false; } if (hasFile()) { if (!getFile().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasDirectory()) { if (!getDirectory().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, id_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, name_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getFile()); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(5, getDirectory()); } if (((bitField0_ & 0x00000020) != 0)) { output.writeMessage(6, getSymlink()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, id_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, name_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getFile()); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getDirectory()); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(6, getSymlink()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode) obj; if (hasType() != other.hasType()) return false; if (hasType()) { if (type_ != other.type_) return false; } if (hasId() != other.hasId()) return false; if (hasId()) { if (getId() != other.getId()) return false; } if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasFile() != other.hasFile()) return false; if (hasFile()) { if (!getFile() .equals(other.getFile())) return false; } if (hasDirectory() != other.hasDirectory()) return false; if (hasDirectory()) { if (!getDirectory() .equals(other.getDirectory())) return false; } if (hasSymlink() != other.hasSymlink()) return false; if (hasSymlink()) { if (!getSymlink() .equals(other.getSymlink())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + type_; } if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getId()); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasFile()) { hash = (37 * hash) + FILE_FIELD_NUMBER; hash = (53 * hash) + getFile().hashCode(); } if (hasDirectory()) { hash = (37 * hash) + DIRECTORY_FIELD_NUMBER; hash = (53 * hash) + getDirectory().hashCode(); } if (hasSymlink()) { hash = (37 * hash) + SYMLINK_FIELD_NUMBER; hash = (53 * hash) + getSymlink().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INode} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INode) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getFileFieldBuilder(); getDirectoryFieldBuilder(); getSymlinkFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); type_ = 1; bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); if (fileBuilder_ == null) { file_ = null; } else { fileBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); if (directoryBuilder_ == null) { directory_ = null; } else { directoryBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); if (symlinkBuilder_ == null) { symlink_ = null; } else { symlinkBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; if (((from_bitField0_ & 0x00000002) != 0)) { result.id_ = id_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.name_ = name_; if (((from_bitField0_ & 0x00000008) != 0)) { if (fileBuilder_ == null) { result.file_ = file_; } else { result.file_ = fileBuilder_.build(); } to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00000010) != 0)) { if (directoryBuilder_ == null) { result.directory_ = directory_; } else { result.directory_ = directoryBuilder_.build(); } to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { if (symlinkBuilder_ == null) { result.symlink_ = symlink_; } else { result.symlink_ = symlinkBuilder_.build(); } to_bitField0_ |= 0x00000020; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) return this; if (other.hasType()) { setType(other.getType()); } if (other.hasId()) { setId(other.getId()); } if (other.hasName()) { setName(other.getName()); } if (other.hasFile()) { mergeFile(other.getFile()); } if (other.hasDirectory()) { mergeDirectory(other.getDirectory()); } if (other.hasSymlink()) { mergeSymlink(other.getSymlink()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasType()) { return false; } if (!hasId()) { return false; } if (hasFile()) { if (!getFile().isInitialized()) { return false; } } if (hasDirectory()) { if (!getDirectory().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int type_ = 1; /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.valueOf(type_); return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE : result; } /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ public Builder setType(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; type_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1; */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); type_ = 1; onChanged(); return this; } private long id_ ; /** * required uint64 id = 2; */ public boolean hasId() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint64 id = 2; */ public long getId() { return id_; } /** * required uint64 id = 2; */ public Builder setId(long value) { bitField0_ |= 0x00000002; id_ = value; onChanged(); return this; } /** * required uint64 id = 2; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000002); id_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes name = 3; */ public boolean hasName() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes name = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } /** * optional bytes name = 3; */ public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; name_ = value; onChanged(); return this; } /** * optional bytes name = 3; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000004); name_ = getDefaultInstance().getName(); onChanged(); return this; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile file_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> fileBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public boolean hasFile() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile() { if (fileBuilder_ == null) { return file_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_; } else { return fileBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public Builder setFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) { if (fileBuilder_ == null) { if (value == null) { throw new NullPointerException(); } file_ = value; onChanged(); } else { fileBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public Builder setFile( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder builderForValue) { if (fileBuilder_ == null) { file_ = builderForValue.build(); onChanged(); } else { fileBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public Builder mergeFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) { if (fileBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && file_ != null && file_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) { file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(file_).mergeFrom(value).buildPartial(); } else { file_ = value; } onChanged(); } else { fileBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public Builder clearFile() { if (fileBuilder_ == null) { file_ = null; onChanged(); } else { fileBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder getFileBuilder() { bitField0_ |= 0x00000008; onChanged(); return getFileFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder() { if (fileBuilder_ != null) { return fileBuilder_.getMessageOrBuilder(); } else { return file_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> getFileFieldBuilder() { if (fileBuilder_ == null) { fileBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder>( getFile(), getParentForChildren(), isClean()); file_ = null; } return fileBuilder_; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory directory_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> directoryBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public boolean hasDirectory() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory() { if (directoryBuilder_ == null) { return directory_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_; } else { return directoryBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public Builder setDirectory(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) { if (directoryBuilder_ == null) { if (value == null) { throw new NullPointerException(); } directory_ = value; onChanged(); } else { directoryBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public Builder setDirectory( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder builderForValue) { if (directoryBuilder_ == null) { directory_ = builderForValue.build(); onChanged(); } else { directoryBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public Builder mergeDirectory(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) { if (directoryBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && directory_ != null && directory_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) { directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.newBuilder(directory_).mergeFrom(value).buildPartial(); } else { directory_ = value; } onChanged(); } else { directoryBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public Builder clearDirectory() { if (directoryBuilder_ == null) { directory_ = null; onChanged(); } else { directoryBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder getDirectoryBuilder() { bitField0_ |= 0x00000010; onChanged(); return getDirectoryFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder() { if (directoryBuilder_ != null) { return directoryBuilder_.getMessageOrBuilder(); } else { return directory_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> getDirectoryFieldBuilder() { if (directoryBuilder_ == null) { directoryBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder>( getDirectory(), getParentForChildren(), isClean()); directory_ = null; } return directoryBuilder_; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink symlink_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder> symlinkBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public boolean hasSymlink() { return ((bitField0_ & 0x00000020) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink() { if (symlinkBuilder_ == null) { return symlink_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_; } else { return symlinkBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public Builder setSymlink(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink value) { if (symlinkBuilder_ == null) { if (value == null) { throw new NullPointerException(); } symlink_ = value; onChanged(); } else { symlinkBuilder_.setMessage(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public Builder setSymlink( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder builderForValue) { if (symlinkBuilder_ == null) { symlink_ = builderForValue.build(); onChanged(); } else { symlinkBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public Builder mergeSymlink(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink value) { if (symlinkBuilder_ == null) { if (((bitField0_ & 0x00000020) != 0) && symlink_ != null && symlink_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance()) { symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.newBuilder(symlink_).mergeFrom(value).buildPartial(); } else { symlink_ = value; } onChanged(); } else { symlinkBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public Builder clearSymlink() { if (symlinkBuilder_ == null) { symlink_ = null; onChanged(); } else { symlinkBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder getSymlinkBuilder() { bitField0_ |= 0x00000020; onChanged(); return getSymlinkFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder() { if (symlinkBuilder_ != null) { return symlinkBuilder_.getMessageOrBuilder(); } else { return symlink_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder> getSymlinkFieldBuilder() { if (symlinkBuilder_ == null) { symlinkBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder>( getSymlink(), getParentForChildren(), isClean()); symlink_ = null; } return symlinkBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INode) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INode) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INode parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INode(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; public static final int LASTINODEID_FIELD_NUMBER = 1; private long lastInodeId_; /** * optional uint64 lastInodeId = 1; */ public boolean hasLastInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 lastInodeId = 1; */ public long getLastInodeId() { return lastInodeId_; } public static final int NUMINODES_FIELD_NUMBER = 2; private long numInodes_; /** *
     * repeated INodes..
     * 
* * optional uint64 numInodes = 2; */ public boolean hasNumInodes() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * repeated INodes..
     * 
* * optional uint64 numInodes = 2; */ public long getNumInodes() { return numInodes_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, lastInodeId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, numInodes_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, lastInodeId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, numInodes_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) obj; if (hasLastInodeId() != other.hasLastInodeId()) return false; if (hasLastInodeId()) { if (getLastInodeId() != other.getLastInodeId()) return false; } if (hasNumInodes() != other.hasNumInodes()) return false; if (hasNumInodes()) { if (getNumInodes() != other.getNumInodes()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasLastInodeId()) { hash = (37 * hash) + LASTINODEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastInodeId()); } if (hasNumInodes()) { hash = (37 * hash) + NUMINODES_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNumInodes()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big Endian).
     * The first and the second parts are the string ids of the user and
     * group name, and the last 16 bits are the permission bits.
     * Name: INODE
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); lastInodeId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); numInodes_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.lastInodeId_ = lastInodeId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.numInodes_ = numInodes_; to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.getDefaultInstance()) return this; if (other.hasLastInodeId()) { setLastInodeId(other.getLastInodeId()); } if (other.hasNumInodes()) { setNumInodes(other.getNumInodes()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long lastInodeId_ ; /** * optional uint64 lastInodeId = 1; */ public boolean hasLastInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 lastInodeId = 1; */ public long getLastInodeId() { return lastInodeId_; } /** * optional uint64 lastInodeId = 1; */ public Builder setLastInodeId(long value) { bitField0_ |= 0x00000001; lastInodeId_ = value; onChanged(); return this; } /** * optional uint64 lastInodeId = 1; */ public Builder clearLastInodeId() { bitField0_ = (bitField0_ & ~0x00000001); lastInodeId_ = 0L; onChanged(); return this; } private long numInodes_ ; /** *
       * repeated INodes..
       * 
* * optional uint64 numInodes = 2; */ public boolean hasNumInodes() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * repeated INodes..
       * 
* * optional uint64 numInodes = 2; */ public long getNumInodes() { return numInodes_; } /** *
       * repeated INodes..
       * 
* * optional uint64 numInodes = 2; */ public Builder setNumInodes(long value) { bitField0_ |= 0x00000002; numInodes_ = value; onChanged(); return this; } /** *
       * repeated INodes..
       * 
* * optional uint64 numInodes = 2; */ public Builder clearNumInodes() { bitField0_ = (bitField0_ & ~0x00000002); numInodes_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INodeSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INodeSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface FilesUnderConstructionSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FilesUnderConstructionSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * This section records information about under-construction files for
   * reconstructing the lease map.
   * NAME: FILES_UNDERCONSTRUCTION
   * 
* * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection} */ public static final class FilesUnderConstructionSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection) FilesUnderConstructionSectionOrBuilder { private static final long serialVersionUID = 0L; // Use FilesUnderConstructionSection.newBuilder() to construct. private FilesUnderConstructionSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FilesUnderConstructionSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FilesUnderConstructionSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.Builder.class); } public interface FileUnderConstructionEntryOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 inodeId = 1; */ boolean hasInodeId(); /** * optional uint64 inodeId = 1; */ long getInodeId(); /** * optional string fullPath = 2; */ boolean hasFullPath(); /** * optional string fullPath = 2; */ java.lang.String getFullPath(); /** * optional string fullPath = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getFullPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry} */ public static final class FileUnderConstructionEntry extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry) FileUnderConstructionEntryOrBuilder { private static final long serialVersionUID = 0L; // Use FileUnderConstructionEntry.newBuilder() to construct. private FileUnderConstructionEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FileUnderConstructionEntry() { fullPath_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FileUnderConstructionEntry( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; inodeId_ = input.readUInt64(); break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; fullPath_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.Builder.class); } private int bitField0_; public static final int INODEID_FIELD_NUMBER = 1; private long inodeId_; /** * optional uint64 inodeId = 1; */ public boolean hasInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 inodeId = 1; */ public long getInodeId() { return inodeId_; } public static final int FULLPATH_FIELD_NUMBER = 2; private volatile java.lang.Object fullPath_; /** * optional string fullPath = 2; */ public boolean hasFullPath() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string fullPath = 2; */ public java.lang.String getFullPath() { java.lang.Object ref = fullPath_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fullPath_ = s; } return s; } } /** * optional string fullPath = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFullPathBytes() { java.lang.Object ref = fullPath_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fullPath_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, inodeId_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, fullPath_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, inodeId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, fullPath_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry) obj; if (hasInodeId() != other.hasInodeId()) return false; if (hasInodeId()) { if (getInodeId() != other.getInodeId()) return false; } if (hasFullPath() != other.hasFullPath()) return false; if (hasFullPath()) { if (!getFullPath() .equals(other.getFullPath())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasInodeId()) { hash = (37 * hash) + INODEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getInodeId()); } if (hasFullPath()) { hash = (37 * hash) + FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getFullPath().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry) org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntryOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); inodeId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); fullPath_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.inodeId_ = inodeId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.fullPath_ = fullPath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.getDefaultInstance()) return this; if (other.hasInodeId()) { setInodeId(other.getInodeId()); } if (other.hasFullPath()) { bitField0_ |= 0x00000002; fullPath_ = other.fullPath_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long inodeId_ ; /** * optional uint64 inodeId = 1; */ public boolean hasInodeId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 inodeId = 1; */ public long getInodeId() { return inodeId_; } /** * optional uint64 inodeId = 1; */ public Builder setInodeId(long value) { bitField0_ |= 0x00000001; inodeId_ = value; onChanged(); return this; } /** * optional uint64 inodeId = 1; */ public Builder clearInodeId() { bitField0_ = (bitField0_ & ~0x00000001); inodeId_ = 0L; onChanged(); return this; } private java.lang.Object fullPath_ = ""; /** * optional string fullPath = 2; */ public boolean hasFullPath() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string fullPath = 2; */ public java.lang.String getFullPath() { java.lang.Object ref = fullPath_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fullPath_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string fullPath = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getFullPathBytes() { java.lang.Object ref = fullPath_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fullPath_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string fullPath = 2; */ public Builder setFullPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fullPath_ = value; onChanged(); return this; } /** * optional string fullPath = 2; */ public Builder clearFullPath() { bitField0_ = (bitField0_ & ~0x00000002); fullPath_ = getDefaultInstance().getFullPath(); onChanged(); return this; } /** * optional string fullPath = 2; */ public Builder setFullPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fullPath_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FileUnderConstructionEntry parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new FileUnderConstructionEntry(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * This section records information about under-construction files for
     * reconstructing the lease map.
     * NAME: FILES_UNDERCONSTRUCTION
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FilesUnderConstructionSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new FilesUnderConstructionSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface INodeDirectorySectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeDirectorySection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * This section records the children of each directories
   * NAME: INODE_DIR
   * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection} */ public static final class INodeDirectorySection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeDirectorySection) INodeDirectorySectionOrBuilder { private static final long serialVersionUID = 0L; // Use INodeDirectorySection.newBuilder() to construct. private INodeDirectorySection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INodeDirectorySection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INodeDirectorySection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.Builder.class); } public interface DirEntryOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 parent = 1; */ boolean hasParent(); /** * optional uint64 parent = 1; */ long getParent(); /** *
       * children that are not reference nodes
       * 
* * repeated uint64 children = 2 [packed = true]; */ java.util.List getChildrenList(); /** *
       * children that are not reference nodes
       * 
* * repeated uint64 children = 2 [packed = true]; */ int getChildrenCount(); /** *
       * children that are not reference nodes
       * 
* * repeated uint64 children = 2 [packed = true]; */ long getChildren(int index); /** *
       * children that are reference nodes, each element is a reference node id
       * 
* * repeated uint32 refChildren = 3 [packed = true]; */ java.util.List getRefChildrenList(); /** *
       * children that are reference nodes, each element is a reference node id
       * 
* * repeated uint32 refChildren = 3 [packed = true]; */ int getRefChildrenCount(); /** *
       * children that are reference nodes, each element is a reference node id
       * 
* * repeated uint32 refChildren = 3 [packed = true]; */ int getRefChildren(int index); } /** *
     **
     * A single DirEntry needs to fit in the default PB max message size of
     * 64MB. Please be careful when adding more fields to a DirEntry!
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry} */ public static final class DirEntry extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry) DirEntryOrBuilder { private static final long serialVersionUID = 0L; // Use DirEntry.newBuilder() to construct. private DirEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DirEntry() { children_ = emptyLongList(); refChildren_ = emptyIntList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DirEntry( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; parent_ = input.readUInt64(); break; } case 16: { if (!((mutable_bitField0_ & 0x00000002) != 0)) { children_ = newLongList(); mutable_bitField0_ |= 0x00000002; } children_.addLong(input.readUInt64()); break; } case 18: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000002) != 0) && input.getBytesUntilLimit() > 0) { children_ = newLongList(); mutable_bitField0_ |= 0x00000002; } while (input.getBytesUntilLimit() > 0) { children_.addLong(input.readUInt64()); } input.popLimit(limit); break; } case 24: { if (!((mutable_bitField0_ & 0x00000004) != 0)) { refChildren_ = newIntList(); mutable_bitField0_ |= 0x00000004; } refChildren_.addInt(input.readUInt32()); break; } case 26: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000004) != 0) && input.getBytesUntilLimit() > 0) { refChildren_ = newIntList(); mutable_bitField0_ |= 0x00000004; } while (input.getBytesUntilLimit() > 0) { refChildren_.addInt(input.readUInt32()); } input.popLimit(limit); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) != 0)) { children_.makeImmutable(); // C } if (((mutable_bitField0_ & 0x00000004) != 0)) { refChildren_.makeImmutable(); // C } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.Builder.class); } private int bitField0_; public static final int PARENT_FIELD_NUMBER = 1; private long parent_; /** * optional uint64 parent = 1; */ public boolean hasParent() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 parent = 1; */ public long getParent() { return parent_; } public static final int CHILDREN_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.Internal.LongList children_; /** *
       * children that are not reference nodes
       * 
* * repeated uint64 children = 2 [packed = true]; */ public java.util.List getChildrenList() { return children_; } /** *
       * children that are not reference nodes
       * 
* * repeated uint64 children = 2 [packed = true]; */ public int getChildrenCount() { return children_.size(); } /** *
       * children that are not reference nodes
       * 
* * repeated uint64 children = 2 [packed = true]; */ public long getChildren(int index) { return children_.getLong(index); } private int childrenMemoizedSerializedSize = -1; public static final int REFCHILDREN_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.Internal.IntList refChildren_; /** *
       * children that are reference nodes, each element is a reference node id
       * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public java.util.List getRefChildrenList() { return refChildren_; } /** *
       * children that are reference nodes, each element is a reference node id
       * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public int getRefChildrenCount() { return refChildren_.size(); } /** *
       * children that are reference nodes, each element is a reference node id
       * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public int getRefChildren(int index) { return refChildren_.getInt(index); } private int refChildrenMemoizedSerializedSize = -1; private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, parent_); } if (getChildrenList().size() > 0) { output.writeUInt32NoTag(18); output.writeUInt32NoTag(childrenMemoizedSerializedSize); } for (int i = 0; i < children_.size(); i++) { output.writeUInt64NoTag(children_.getLong(i)); } if (getRefChildrenList().size() > 0) { output.writeUInt32NoTag(26); output.writeUInt32NoTag(refChildrenMemoizedSerializedSize); } for (int i = 0; i < refChildren_.size(); i++) { output.writeUInt32NoTag(refChildren_.getInt(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, parent_); } { int dataSize = 0; for (int i = 0; i < children_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64SizeNoTag(children_.getLong(i)); } size += dataSize; if (!getChildrenList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } childrenMemoizedSerializedSize = dataSize; } { int dataSize = 0; for (int i = 0; i < refChildren_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32SizeNoTag(refChildren_.getInt(i)); } size += dataSize; if (!getRefChildrenList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } refChildrenMemoizedSerializedSize = dataSize; } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry) obj; if (hasParent() != other.hasParent()) return false; if (hasParent()) { if (getParent() != other.getParent()) return false; } if (!getChildrenList() .equals(other.getChildrenList())) return false; if (!getRefChildrenList() .equals(other.getRefChildrenList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasParent()) { hash = (37 * hash) + PARENT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getParent()); } if (getChildrenCount() > 0) { hash = (37 * hash) + CHILDREN_FIELD_NUMBER; hash = (53 * hash) + getChildrenList().hashCode(); } if (getRefChildrenCount() > 0) { hash = (37 * hash) + REFCHILDREN_FIELD_NUMBER; hash = (53 * hash) + getRefChildrenList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
       **
       * A single DirEntry needs to fit in the default PB max message size of
       * 64MB. Please be careful when adding more fields to a DirEntry!
       * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntryOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); parent_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); children_ = emptyLongList(); bitField0_ = (bitField0_ & ~0x00000002); refChildren_ = emptyIntList(); bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.parent_ = parent_; to_bitField0_ |= 0x00000001; } if (((bitField0_ & 0x00000002) != 0)) { children_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000002); } result.children_ = children_; if (((bitField0_ & 0x00000004) != 0)) { refChildren_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000004); } result.refChildren_ = refChildren_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.getDefaultInstance()) return this; if (other.hasParent()) { setParent(other.getParent()); } if (!other.children_.isEmpty()) { if (children_.isEmpty()) { children_ = other.children_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureChildrenIsMutable(); children_.addAll(other.children_); } onChanged(); } if (!other.refChildren_.isEmpty()) { if (refChildren_.isEmpty()) { refChildren_ = other.refChildren_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureRefChildrenIsMutable(); refChildren_.addAll(other.refChildren_); } onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long parent_ ; /** * optional uint64 parent = 1; */ public boolean hasParent() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 parent = 1; */ public long getParent() { return parent_; } /** * optional uint64 parent = 1; */ public Builder setParent(long value) { bitField0_ |= 0x00000001; parent_ = value; onChanged(); return this; } /** * optional uint64 parent = 1; */ public Builder clearParent() { bitField0_ = (bitField0_ & ~0x00000001); parent_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.Internal.LongList children_ = emptyLongList(); private void ensureChildrenIsMutable() { if (!((bitField0_ & 0x00000002) != 0)) { children_ = mutableCopy(children_); bitField0_ |= 0x00000002; } } /** *
         * children that are not reference nodes
         * 
* * repeated uint64 children = 2 [packed = true]; */ public java.util.List getChildrenList() { return ((bitField0_ & 0x00000002) != 0) ? java.util.Collections.unmodifiableList(children_) : children_; } /** *
         * children that are not reference nodes
         * 
* * repeated uint64 children = 2 [packed = true]; */ public int getChildrenCount() { return children_.size(); } /** *
         * children that are not reference nodes
         * 
* * repeated uint64 children = 2 [packed = true]; */ public long getChildren(int index) { return children_.getLong(index); } /** *
         * children that are not reference nodes
         * 
* * repeated uint64 children = 2 [packed = true]; */ public Builder setChildren( int index, long value) { ensureChildrenIsMutable(); children_.setLong(index, value); onChanged(); return this; } /** *
         * children that are not reference nodes
         * 
* * repeated uint64 children = 2 [packed = true]; */ public Builder addChildren(long value) { ensureChildrenIsMutable(); children_.addLong(value); onChanged(); return this; } /** *
         * children that are not reference nodes
         * 
* * repeated uint64 children = 2 [packed = true]; */ public Builder addAllChildren( java.lang.Iterable values) { ensureChildrenIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, children_); onChanged(); return this; } /** *
         * children that are not reference nodes
         * 
* * repeated uint64 children = 2 [packed = true]; */ public Builder clearChildren() { children_ = emptyLongList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.Internal.IntList refChildren_ = emptyIntList(); private void ensureRefChildrenIsMutable() { if (!((bitField0_ & 0x00000004) != 0)) { refChildren_ = mutableCopy(refChildren_); bitField0_ |= 0x00000004; } } /** *
         * children that are reference nodes, each element is a reference node id
         * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public java.util.List getRefChildrenList() { return ((bitField0_ & 0x00000004) != 0) ? java.util.Collections.unmodifiableList(refChildren_) : refChildren_; } /** *
         * children that are reference nodes, each element is a reference node id
         * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public int getRefChildrenCount() { return refChildren_.size(); } /** *
         * children that are reference nodes, each element is a reference node id
         * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public int getRefChildren(int index) { return refChildren_.getInt(index); } /** *
         * children that are reference nodes, each element is a reference node id
         * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public Builder setRefChildren( int index, int value) { ensureRefChildrenIsMutable(); refChildren_.setInt(index, value); onChanged(); return this; } /** *
         * children that are reference nodes, each element is a reference node id
         * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public Builder addRefChildren(int value) { ensureRefChildrenIsMutable(); refChildren_.addInt(value); onChanged(); return this; } /** *
         * children that are reference nodes, each element is a reference node id
         * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public Builder addAllRefChildren( java.lang.Iterable values) { ensureRefChildrenIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, refChildren_); onChanged(); return this; } /** *
         * children that are reference nodes, each element is a reference node id
         * 
* * repeated uint32 refChildren = 3 [packed = true]; */ public Builder clearRefChildren() { refChildren_ = emptyIntList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DirEntry parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DirEntry(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * This section records the children of each directories
     * NAME: INODE_DIR
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeDirectorySection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeDirectorySection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeDirectorySection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INodeDirectorySection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INodeDirectorySection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface INodeReferenceSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeReferenceSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection} */ public static final class INodeReferenceSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeReferenceSection) INodeReferenceSectionOrBuilder { private static final long serialVersionUID = 0L; // Use INodeReferenceSection.newBuilder() to construct. private INodeReferenceSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INodeReferenceSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INodeReferenceSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.Builder.class); } public interface INodeReferenceOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
       * id of the referred inode
       * 
* * optional uint64 referredId = 1; */ boolean hasReferredId(); /** *
       * id of the referred inode
       * 
* * optional uint64 referredId = 1; */ long getReferredId(); /** *
       * local name recorded in WithName
       * 
* * optional bytes name = 2; */ boolean hasName(); /** *
       * local name recorded in WithName
       * 
* * optional bytes name = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getName(); /** *
       * recorded in DstReference
       * 
* * optional uint32 dstSnapshotId = 3; */ boolean hasDstSnapshotId(); /** *
       * recorded in DstReference
       * 
* * optional uint32 dstSnapshotId = 3; */ int getDstSnapshotId(); /** *
       * recorded in WithName
       * 
* * optional uint32 lastSnapshotId = 4; */ boolean hasLastSnapshotId(); /** *
       * recorded in WithName
       * 
* * optional uint32 lastSnapshotId = 4; */ int getLastSnapshotId(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference} */ public static final class INodeReference extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference) INodeReferenceOrBuilder { private static final long serialVersionUID = 0L; // Use INodeReference.newBuilder() to construct. private INodeReference(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private INodeReference() { name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private INodeReference( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; referredId_ = input.readUInt64(); break; } case 18: { bitField0_ |= 0x00000002; name_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; dstSnapshotId_ = input.readUInt32(); break; } case 32: { bitField0_ |= 0x00000008; lastSnapshotId_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.Builder.class); } private int bitField0_; public static final int REFERREDID_FIELD_NUMBER = 1; private long referredId_; /** *
       * id of the referred inode
       * 
* * optional uint64 referredId = 1; */ public boolean hasReferredId() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * id of the referred inode
       * 
* * optional uint64 referredId = 1; */ public long getReferredId() { return referredId_; } public static final int NAME_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.ByteString name_; /** *
       * local name recorded in WithName
       * 
* * optional bytes name = 2; */ public boolean hasName() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * local name recorded in WithName
       * 
* * optional bytes name = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } public static final int DSTSNAPSHOTID_FIELD_NUMBER = 3; private int dstSnapshotId_; /** *
       * recorded in DstReference
       * 
* * optional uint32 dstSnapshotId = 3; */ public boolean hasDstSnapshotId() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * recorded in DstReference
       * 
* * optional uint32 dstSnapshotId = 3; */ public int getDstSnapshotId() { return dstSnapshotId_; } public static final int LASTSNAPSHOTID_FIELD_NUMBER = 4; private int lastSnapshotId_; /** *
       * recorded in WithName
       * 
* * optional uint32 lastSnapshotId = 4; */ public boolean hasLastSnapshotId() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * recorded in WithName
       * 
* * optional uint32 lastSnapshotId = 4; */ public int getLastSnapshotId() { return lastSnapshotId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, referredId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeBytes(2, name_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, dstSnapshotId_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, lastSnapshotId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, referredId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(2, name_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, dstSnapshotId_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, lastSnapshotId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference) obj; if (hasReferredId() != other.hasReferredId()) return false; if (hasReferredId()) { if (getReferredId() != other.getReferredId()) return false; } if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasDstSnapshotId() != other.hasDstSnapshotId()) return false; if (hasDstSnapshotId()) { if (getDstSnapshotId() != other.getDstSnapshotId()) return false; } if (hasLastSnapshotId() != other.hasLastSnapshotId()) return false; if (hasLastSnapshotId()) { if (getLastSnapshotId() != other.getLastSnapshotId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasReferredId()) { hash = (37 * hash) + REFERREDID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getReferredId()); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasDstSnapshotId()) { hash = (37 * hash) + DSTSNAPSHOTID_FIELD_NUMBER; hash = (53 * hash) + getDstSnapshotId(); } if (hasLastSnapshotId()) { hash = (37 * hash) + LASTSNAPSHOTID_FIELD_NUMBER; hash = (53 * hash) + getLastSnapshotId(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReferenceOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); referredId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); dstSnapshotId_ = 0; bitField0_ = (bitField0_ & ~0x00000004); lastSnapshotId_ = 0; bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.referredId_ = referredId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.name_ = name_; if (((from_bitField0_ & 0x00000004) != 0)) { result.dstSnapshotId_ = dstSnapshotId_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.lastSnapshotId_ = lastSnapshotId_; to_bitField0_ |= 0x00000008; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.getDefaultInstance()) return this; if (other.hasReferredId()) { setReferredId(other.getReferredId()); } if (other.hasName()) { setName(other.getName()); } if (other.hasDstSnapshotId()) { setDstSnapshotId(other.getDstSnapshotId()); } if (other.hasLastSnapshotId()) { setLastSnapshotId(other.getLastSnapshotId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long referredId_ ; /** *
         * id of the referred inode
         * 
* * optional uint64 referredId = 1; */ public boolean hasReferredId() { return ((bitField0_ & 0x00000001) != 0); } /** *
         * id of the referred inode
         * 
* * optional uint64 referredId = 1; */ public long getReferredId() { return referredId_; } /** *
         * id of the referred inode
         * 
* * optional uint64 referredId = 1; */ public Builder setReferredId(long value) { bitField0_ |= 0x00000001; referredId_ = value; onChanged(); return this; } /** *
         * id of the referred inode
         * 
* * optional uint64 referredId = 1; */ public Builder clearReferredId() { bitField0_ = (bitField0_ & ~0x00000001); referredId_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** *
         * local name recorded in WithName
         * 
* * optional bytes name = 2; */ public boolean hasName() { return ((bitField0_ & 0x00000002) != 0); } /** *
         * local name recorded in WithName
         * 
* * optional bytes name = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } /** *
         * local name recorded in WithName
         * 
* * optional bytes name = 2; */ public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; name_ = value; onChanged(); return this; } /** *
         * local name recorded in WithName
         * 
* * optional bytes name = 2; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000002); name_ = getDefaultInstance().getName(); onChanged(); return this; } private int dstSnapshotId_ ; /** *
         * recorded in DstReference
         * 
* * optional uint32 dstSnapshotId = 3; */ public boolean hasDstSnapshotId() { return ((bitField0_ & 0x00000004) != 0); } /** *
         * recorded in DstReference
         * 
* * optional uint32 dstSnapshotId = 3; */ public int getDstSnapshotId() { return dstSnapshotId_; } /** *
         * recorded in DstReference
         * 
* * optional uint32 dstSnapshotId = 3; */ public Builder setDstSnapshotId(int value) { bitField0_ |= 0x00000004; dstSnapshotId_ = value; onChanged(); return this; } /** *
         * recorded in DstReference
         * 
* * optional uint32 dstSnapshotId = 3; */ public Builder clearDstSnapshotId() { bitField0_ = (bitField0_ & ~0x00000004); dstSnapshotId_ = 0; onChanged(); return this; } private int lastSnapshotId_ ; /** *
         * recorded in WithName
         * 
* * optional uint32 lastSnapshotId = 4; */ public boolean hasLastSnapshotId() { return ((bitField0_ & 0x00000008) != 0); } /** *
         * recorded in WithName
         * 
* * optional uint32 lastSnapshotId = 4; */ public int getLastSnapshotId() { return lastSnapshotId_; } /** *
         * recorded in WithName
         * 
* * optional uint32 lastSnapshotId = 4; */ public Builder setLastSnapshotId(int value) { bitField0_ |= 0x00000008; lastSnapshotId_ = value; onChanged(); return this; } /** *
         * recorded in WithName
         * 
* * optional uint32 lastSnapshotId = 4; */ public Builder clearLastSnapshotId() { bitField0_ = (bitField0_ & ~0x00000008); lastSnapshotId_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INodeReference parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INodeReference(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeReferenceSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeReferenceSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeReferenceSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public INodeReferenceSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new INodeReferenceSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 snapshotCounter = 1; */ boolean hasSnapshotCounter(); /** * optional uint32 snapshotCounter = 1; */ int getSnapshotCounter(); /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ java.util.List getSnapshottableDirList(); /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ int getSnapshottableDirCount(); /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ long getSnapshottableDir(int index); /** *
     * total number of snapshots
     * 
* * optional uint32 numSnapshots = 3; */ boolean hasNumSnapshots(); /** *
     * total number of snapshots
     * 
* * optional uint32 numSnapshots = 3; */ int getNumSnapshots(); } /** *
   **
   * This section records the information about snapshot
   * NAME: SNAPSHOT
   * 
* * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection} */ public static final class SnapshotSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotSection) SnapshotSectionOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotSection.newBuilder() to construct. private SnapshotSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotSection() { snapshottableDir_ = emptyLongList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; snapshotCounter_ = input.readUInt32(); break; } case 16: { if (!((mutable_bitField0_ & 0x00000002) != 0)) { snapshottableDir_ = newLongList(); mutable_bitField0_ |= 0x00000002; } snapshottableDir_.addLong(input.readUInt64()); break; } case 18: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000002) != 0) && input.getBytesUntilLimit() > 0) { snapshottableDir_ = newLongList(); mutable_bitField0_ |= 0x00000002; } while (input.getBytesUntilLimit() > 0) { snapshottableDir_.addLong(input.readUInt64()); } input.popLimit(limit); break; } case 24: { bitField0_ |= 0x00000002; numSnapshots_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) != 0)) { snapshottableDir_.makeImmutable(); // C } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Builder.class); } public interface SnapshotOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotSection.Snapshot) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 snapshotId = 1; */ boolean hasSnapshotId(); /** * optional uint32 snapshotId = 1; */ int getSnapshotId(); /** *
       * Snapshot root
       * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ boolean hasRoot(); /** *
       * Snapshot root
       * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot(); /** *
       * Snapshot root
       * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection.Snapshot} */ public static final class Snapshot extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotSection.Snapshot) SnapshotOrBuilder { private static final long serialVersionUID = 0L; // Use Snapshot.newBuilder() to construct. private Snapshot(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Snapshot() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Snapshot( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; snapshotId_ = input.readUInt32(); break; } case 18: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder subBuilder = null; if (((bitField0_ & 0x00000002) != 0)) { subBuilder = root_.toBuilder(); } root_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(root_); root_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.Builder.class); } private int bitField0_; public static final int SNAPSHOTID_FIELD_NUMBER = 1; private int snapshotId_; /** * optional uint32 snapshotId = 1; */ public boolean hasSnapshotId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotId = 1; */ public int getSnapshotId() { return snapshotId_; } public static final int ROOT_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode root_; /** *
       * Snapshot root
       * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public boolean hasRoot() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Snapshot root
       * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot() { return root_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_; } /** *
       * Snapshot root
       * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder() { return root_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (hasRoot()) { if (!getRoot().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, snapshotId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(2, getRoot()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, snapshotId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, getRoot()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot) obj; if (hasSnapshotId() != other.hasSnapshotId()) return false; if (hasSnapshotId()) { if (getSnapshotId() != other.getSnapshotId()) return false; } if (hasRoot() != other.hasRoot()) return false; if (hasRoot()) { if (!getRoot() .equals(other.getRoot())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotId()) { hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER; hash = (53 * hash) + getSnapshotId(); } if (hasRoot()) { hash = (37 * hash) + ROOT_FIELD_NUMBER; hash = (53 * hash) + getRoot().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection.Snapshot} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotSection.Snapshot) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.SnapshotOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getRootFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); snapshotId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); if (rootBuilder_ == null) { root_ = null; } else { rootBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.snapshotId_ = snapshotId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { if (rootBuilder_ == null) { result.root_ = root_; } else { result.root_ = rootBuilder_.build(); } to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.getDefaultInstance()) return this; if (other.hasSnapshotId()) { setSnapshotId(other.getSnapshotId()); } if (other.hasRoot()) { mergeRoot(other.getRoot()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (hasRoot()) { if (!getRoot().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int snapshotId_ ; /** * optional uint32 snapshotId = 1; */ public boolean hasSnapshotId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotId = 1; */ public int getSnapshotId() { return snapshotId_; } /** * optional uint32 snapshotId = 1; */ public Builder setSnapshotId(int value) { bitField0_ |= 0x00000001; snapshotId_ = value; onChanged(); return this; } /** * optional uint32 snapshotId = 1; */ public Builder clearSnapshotId() { bitField0_ = (bitField0_ & ~0x00000001); snapshotId_ = 0; onChanged(); return this; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode root_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder> rootBuilder_; /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public boolean hasRoot() { return ((bitField0_ & 0x00000002) != 0); } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot() { if (rootBuilder_ == null) { return root_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_; } else { return rootBuilder_.getMessage(); } } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public Builder setRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) { if (rootBuilder_ == null) { if (value == null) { throw new NullPointerException(); } root_ = value; onChanged(); } else { rootBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public Builder setRoot( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder builderForValue) { if (rootBuilder_ == null) { root_ = builderForValue.build(); onChanged(); } else { rootBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public Builder mergeRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) { if (rootBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0) && root_ != null && root_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) { root_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.newBuilder(root_).mergeFrom(value).buildPartial(); } else { root_ = value; } onChanged(); } else { rootBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public Builder clearRoot() { if (rootBuilder_ == null) { root_ = null; onChanged(); } else { rootBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder getRootBuilder() { bitField0_ |= 0x00000002; onChanged(); return getRootFieldBuilder().getBuilder(); } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder() { if (rootBuilder_ != null) { return rootBuilder_.getMessageOrBuilder(); } else { return root_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_; } } /** *
         * Snapshot root
         * 
* * optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder> getRootFieldBuilder() { if (rootBuilder_ == null) { rootBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder>( getRoot(), getParentForChildren(), isClean()); root_ = null; } return rootBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotSection.Snapshot) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotSection.Snapshot) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public Snapshot parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new Snapshot(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; public static final int SNAPSHOTCOUNTER_FIELD_NUMBER = 1; private int snapshotCounter_; /** * optional uint32 snapshotCounter = 1; */ public boolean hasSnapshotCounter() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotCounter = 1; */ public int getSnapshotCounter() { return snapshotCounter_; } public static final int SNAPSHOTTABLEDIR_FIELD_NUMBER = 2; private org.apache.hadoop.thirdparty.protobuf.Internal.LongList snapshottableDir_; /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public java.util.List getSnapshottableDirList() { return snapshottableDir_; } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public int getSnapshottableDirCount() { return snapshottableDir_.size(); } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public long getSnapshottableDir(int index) { return snapshottableDir_.getLong(index); } private int snapshottableDirMemoizedSerializedSize = -1; public static final int NUMSNAPSHOTS_FIELD_NUMBER = 3; private int numSnapshots_; /** *
     * total number of snapshots
     * 
* * optional uint32 numSnapshots = 3; */ public boolean hasNumSnapshots() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * total number of snapshots
     * 
* * optional uint32 numSnapshots = 3; */ public int getNumSnapshots() { return numSnapshots_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, snapshotCounter_); } if (getSnapshottableDirList().size() > 0) { output.writeUInt32NoTag(18); output.writeUInt32NoTag(snapshottableDirMemoizedSerializedSize); } for (int i = 0; i < snapshottableDir_.size(); i++) { output.writeUInt64NoTag(snapshottableDir_.getLong(i)); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(3, numSnapshots_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, snapshotCounter_); } { int dataSize = 0; for (int i = 0; i < snapshottableDir_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64SizeNoTag(snapshottableDir_.getLong(i)); } size += dataSize; if (!getSnapshottableDirList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } snapshottableDirMemoizedSerializedSize = dataSize; } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, numSnapshots_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection) obj; if (hasSnapshotCounter() != other.hasSnapshotCounter()) return false; if (hasSnapshotCounter()) { if (getSnapshotCounter() != other.getSnapshotCounter()) return false; } if (!getSnapshottableDirList() .equals(other.getSnapshottableDirList())) return false; if (hasNumSnapshots() != other.hasNumSnapshots()) return false; if (hasNumSnapshots()) { if (getNumSnapshots() != other.getNumSnapshots()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotCounter()) { hash = (37 * hash) + SNAPSHOTCOUNTER_FIELD_NUMBER; hash = (53 * hash) + getSnapshotCounter(); } if (getSnapshottableDirCount() > 0) { hash = (37 * hash) + SNAPSHOTTABLEDIR_FIELD_NUMBER; hash = (53 * hash) + getSnapshottableDirList().hashCode(); } if (hasNumSnapshots()) { hash = (37 * hash) + NUMSNAPSHOTS_FIELD_NUMBER; hash = (53 * hash) + getNumSnapshots(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * This section records the information about snapshot
     * NAME: SNAPSHOT
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); snapshotCounter_ = 0; bitField0_ = (bitField0_ & ~0x00000001); snapshottableDir_ = emptyLongList(); bitField0_ = (bitField0_ & ~0x00000002); numSnapshots_ = 0; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.snapshotCounter_ = snapshotCounter_; to_bitField0_ |= 0x00000001; } if (((bitField0_ & 0x00000002) != 0)) { snapshottableDir_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000002); } result.snapshottableDir_ = snapshottableDir_; if (((from_bitField0_ & 0x00000004) != 0)) { result.numSnapshots_ = numSnapshots_; to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.getDefaultInstance()) return this; if (other.hasSnapshotCounter()) { setSnapshotCounter(other.getSnapshotCounter()); } if (!other.snapshottableDir_.isEmpty()) { if (snapshottableDir_.isEmpty()) { snapshottableDir_ = other.snapshottableDir_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureSnapshottableDirIsMutable(); snapshottableDir_.addAll(other.snapshottableDir_); } onChanged(); } if (other.hasNumSnapshots()) { setNumSnapshots(other.getNumSnapshots()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int snapshotCounter_ ; /** * optional uint32 snapshotCounter = 1; */ public boolean hasSnapshotCounter() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotCounter = 1; */ public int getSnapshotCounter() { return snapshotCounter_; } /** * optional uint32 snapshotCounter = 1; */ public Builder setSnapshotCounter(int value) { bitField0_ |= 0x00000001; snapshotCounter_ = value; onChanged(); return this; } /** * optional uint32 snapshotCounter = 1; */ public Builder clearSnapshotCounter() { bitField0_ = (bitField0_ & ~0x00000001); snapshotCounter_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.Internal.LongList snapshottableDir_ = emptyLongList(); private void ensureSnapshottableDirIsMutable() { if (!((bitField0_ & 0x00000002) != 0)) { snapshottableDir_ = mutableCopy(snapshottableDir_); bitField0_ |= 0x00000002; } } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public java.util.List getSnapshottableDirList() { return ((bitField0_ & 0x00000002) != 0) ? java.util.Collections.unmodifiableList(snapshottableDir_) : snapshottableDir_; } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public int getSnapshottableDirCount() { return snapshottableDir_.size(); } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public long getSnapshottableDir(int index) { return snapshottableDir_.getLong(index); } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public Builder setSnapshottableDir( int index, long value) { ensureSnapshottableDirIsMutable(); snapshottableDir_.setLong(index, value); onChanged(); return this; } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public Builder addSnapshottableDir(long value) { ensureSnapshottableDirIsMutable(); snapshottableDir_.addLong(value); onChanged(); return this; } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public Builder addAllSnapshottableDir( java.lang.Iterable values) { ensureSnapshottableDirIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, snapshottableDir_); onChanged(); return this; } /** * repeated uint64 snapshottableDir = 2 [packed = true]; */ public Builder clearSnapshottableDir() { snapshottableDir_ = emptyLongList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } private int numSnapshots_ ; /** *
       * total number of snapshots
       * 
* * optional uint32 numSnapshots = 3; */ public boolean hasNumSnapshots() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * total number of snapshots
       * 
* * optional uint32 numSnapshots = 3; */ public int getNumSnapshots() { return numSnapshots_; } /** *
       * total number of snapshots
       * 
* * optional uint32 numSnapshots = 3; */ public Builder setNumSnapshots(int value) { bitField0_ |= 0x00000004; numSnapshots_ = value; onChanged(); return this; } /** *
       * total number of snapshots
       * 
* * optional uint32 numSnapshots = 3; */ public Builder clearNumSnapshots() { bitField0_ = (bitField0_ & ~0x00000004); numSnapshots_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SnapshotDiffSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * This section records information about snapshot diffs
   * NAME: SNAPSHOT_DIFF
   * 
* * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection} */ public static final class SnapshotDiffSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection) SnapshotDiffSectionOrBuilder { private static final long serialVersionUID = 0L; // Use SnapshotDiffSection.newBuilder() to construct. private SnapshotDiffSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SnapshotDiffSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.Builder.class); } public interface CreatedListEntryOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional bytes name = 1; */ boolean hasName(); /** * optional bytes name = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getName(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry} */ public static final class CreatedListEntry extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry) CreatedListEntryOrBuilder { private static final long serialVersionUID = 0L; // Use CreatedListEntry.newBuilder() to construct. private CreatedListEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private CreatedListEntry() { name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CreatedListEntry( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { bitField0_ |= 0x00000001; name_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.Builder.class); } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; private org.apache.hadoop.thirdparty.protobuf.ByteString name_; /** * optional bytes name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional bytes name = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBytes(1, name_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(1, name_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry) obj; if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntryOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.name_ = name_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.getDefaultInstance()) return this; if (other.hasName()) { setName(other.getName()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) != 0); } /** * optional bytes name = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } /** * optional bytes name = 1; */ public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } /** * optional bytes name = 1; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); name_ = getDefaultInstance().getName(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public CreatedListEntry parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new CreatedListEntry(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DirectoryDiffOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 snapshotId = 1; */ boolean hasSnapshotId(); /** * optional uint32 snapshotId = 1; */ int getSnapshotId(); /** * optional uint32 childrenSize = 2; */ boolean hasChildrenSize(); /** * optional uint32 childrenSize = 2; */ int getChildrenSize(); /** * optional bool isSnapshotRoot = 3; */ boolean hasIsSnapshotRoot(); /** * optional bool isSnapshotRoot = 3; */ boolean getIsSnapshotRoot(); /** * optional bytes name = 4; */ boolean hasName(); /** * optional bytes name = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getName(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ boolean hasSnapshotCopy(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder(); /** * optional uint32 createdListSize = 6; */ boolean hasCreatedListSize(); /** * optional uint32 createdListSize = 6; */ int getCreatedListSize(); /** *
       * id of deleted inodes
       * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ java.util.List getDeletedINodeList(); /** *
       * id of deleted inodes
       * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ int getDeletedINodeCount(); /** *
       * id of deleted inodes
       * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ long getDeletedINode(int index); /** *
       * id of reference nodes in the deleted list
       * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ java.util.List getDeletedINodeRefList(); /** *
       * id of reference nodes in the deleted list
       * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ int getDeletedINodeRefCount(); /** *
       * id of reference nodes in the deleted list
       * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ int getDeletedINodeRef(int index); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff} */ public static final class DirectoryDiff extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff) DirectoryDiffOrBuilder { private static final long serialVersionUID = 0L; // Use DirectoryDiff.newBuilder() to construct. private DirectoryDiff(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DirectoryDiff() { name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; deletedINode_ = emptyLongList(); deletedINodeRef_ = emptyIntList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DirectoryDiff( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; snapshotId_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; childrenSize_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; isSnapshotRoot_ = input.readBool(); break; } case 34: { bitField0_ |= 0x00000008; name_ = input.readBytes(); break; } case 42: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder subBuilder = null; if (((bitField0_ & 0x00000010) != 0)) { subBuilder = snapshotCopy_.toBuilder(); } snapshotCopy_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(snapshotCopy_); snapshotCopy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } case 48: { bitField0_ |= 0x00000020; createdListSize_ = input.readUInt32(); break; } case 56: { if (!((mutable_bitField0_ & 0x00000040) != 0)) { deletedINode_ = newLongList(); mutable_bitField0_ |= 0x00000040; } deletedINode_.addLong(input.readUInt64()); break; } case 58: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000040) != 0) && input.getBytesUntilLimit() > 0) { deletedINode_ = newLongList(); mutable_bitField0_ |= 0x00000040; } while (input.getBytesUntilLimit() > 0) { deletedINode_.addLong(input.readUInt64()); } input.popLimit(limit); break; } case 64: { if (!((mutable_bitField0_ & 0x00000080) != 0)) { deletedINodeRef_ = newIntList(); mutable_bitField0_ |= 0x00000080; } deletedINodeRef_.addInt(input.readUInt32()); break; } case 66: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000080) != 0) && input.getBytesUntilLimit() > 0) { deletedINodeRef_ = newIntList(); mutable_bitField0_ |= 0x00000080; } while (input.getBytesUntilLimit() > 0) { deletedINodeRef_.addInt(input.readUInt32()); } input.popLimit(limit); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000040) != 0)) { deletedINode_.makeImmutable(); // C } if (((mutable_bitField0_ & 0x00000080) != 0)) { deletedINodeRef_.makeImmutable(); // C } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder.class); } private int bitField0_; public static final int SNAPSHOTID_FIELD_NUMBER = 1; private int snapshotId_; /** * optional uint32 snapshotId = 1; */ public boolean hasSnapshotId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotId = 1; */ public int getSnapshotId() { return snapshotId_; } public static final int CHILDRENSIZE_FIELD_NUMBER = 2; private int childrenSize_; /** * optional uint32 childrenSize = 2; */ public boolean hasChildrenSize() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 childrenSize = 2; */ public int getChildrenSize() { return childrenSize_; } public static final int ISSNAPSHOTROOT_FIELD_NUMBER = 3; private boolean isSnapshotRoot_; /** * optional bool isSnapshotRoot = 3; */ public boolean hasIsSnapshotRoot() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bool isSnapshotRoot = 3; */ public boolean getIsSnapshotRoot() { return isSnapshotRoot_; } public static final int NAME_FIELD_NUMBER = 4; private org.apache.hadoop.thirdparty.protobuf.ByteString name_; /** * optional bytes name = 4; */ public boolean hasName() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes name = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } public static final int SNAPSHOTCOPY_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory snapshotCopy_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public boolean hasSnapshotCopy() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy() { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder() { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_; } public static final int CREATEDLISTSIZE_FIELD_NUMBER = 6; private int createdListSize_; /** * optional uint32 createdListSize = 6; */ public boolean hasCreatedListSize() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint32 createdListSize = 6; */ public int getCreatedListSize() { return createdListSize_; } public static final int DELETEDINODE_FIELD_NUMBER = 7; private org.apache.hadoop.thirdparty.protobuf.Internal.LongList deletedINode_; /** *
       * id of deleted inodes
       * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public java.util.List getDeletedINodeList() { return deletedINode_; } /** *
       * id of deleted inodes
       * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public int getDeletedINodeCount() { return deletedINode_.size(); } /** *
       * id of deleted inodes
       * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public long getDeletedINode(int index) { return deletedINode_.getLong(index); } private int deletedINodeMemoizedSerializedSize = -1; public static final int DELETEDINODEREF_FIELD_NUMBER = 8; private org.apache.hadoop.thirdparty.protobuf.Internal.IntList deletedINodeRef_; /** *
       * id of reference nodes in the deleted list
       * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public java.util.List getDeletedINodeRefList() { return deletedINodeRef_; } /** *
       * id of reference nodes in the deleted list
       * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public int getDeletedINodeRefCount() { return deletedINodeRef_.size(); } /** *
       * id of reference nodes in the deleted list
       * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public int getDeletedINodeRef(int index) { return deletedINodeRef_.getInt(index); } private int deletedINodeRefMemoizedSerializedSize = -1; private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (hasSnapshotCopy()) { if (!getSnapshotCopy().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, snapshotId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, childrenSize_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBool(3, isSnapshotRoot_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeBytes(4, name_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(5, getSnapshotCopy()); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt32(6, createdListSize_); } if (getDeletedINodeList().size() > 0) { output.writeUInt32NoTag(58); output.writeUInt32NoTag(deletedINodeMemoizedSerializedSize); } for (int i = 0; i < deletedINode_.size(); i++) { output.writeUInt64NoTag(deletedINode_.getLong(i)); } if (getDeletedINodeRefList().size() > 0) { output.writeUInt32NoTag(66); output.writeUInt32NoTag(deletedINodeRefMemoizedSerializedSize); } for (int i = 0; i < deletedINodeRef_.size(); i++) { output.writeUInt32NoTag(deletedINodeRef_.getInt(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, snapshotId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, childrenSize_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(3, isSnapshotRoot_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(4, name_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, getSnapshotCopy()); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(6, createdListSize_); } { int dataSize = 0; for (int i = 0; i < deletedINode_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64SizeNoTag(deletedINode_.getLong(i)); } size += dataSize; if (!getDeletedINodeList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } deletedINodeMemoizedSerializedSize = dataSize; } { int dataSize = 0; for (int i = 0; i < deletedINodeRef_.size(); i++) { dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32SizeNoTag(deletedINodeRef_.getInt(i)); } size += dataSize; if (!getDeletedINodeRefList().isEmpty()) { size += 1; size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } deletedINodeRefMemoizedSerializedSize = dataSize; } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff) obj; if (hasSnapshotId() != other.hasSnapshotId()) return false; if (hasSnapshotId()) { if (getSnapshotId() != other.getSnapshotId()) return false; } if (hasChildrenSize() != other.hasChildrenSize()) return false; if (hasChildrenSize()) { if (getChildrenSize() != other.getChildrenSize()) return false; } if (hasIsSnapshotRoot() != other.hasIsSnapshotRoot()) return false; if (hasIsSnapshotRoot()) { if (getIsSnapshotRoot() != other.getIsSnapshotRoot()) return false; } if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasSnapshotCopy() != other.hasSnapshotCopy()) return false; if (hasSnapshotCopy()) { if (!getSnapshotCopy() .equals(other.getSnapshotCopy())) return false; } if (hasCreatedListSize() != other.hasCreatedListSize()) return false; if (hasCreatedListSize()) { if (getCreatedListSize() != other.getCreatedListSize()) return false; } if (!getDeletedINodeList() .equals(other.getDeletedINodeList())) return false; if (!getDeletedINodeRefList() .equals(other.getDeletedINodeRefList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotId()) { hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER; hash = (53 * hash) + getSnapshotId(); } if (hasChildrenSize()) { hash = (37 * hash) + CHILDRENSIZE_FIELD_NUMBER; hash = (53 * hash) + getChildrenSize(); } if (hasIsSnapshotRoot()) { hash = (37 * hash) + ISSNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsSnapshotRoot()); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasSnapshotCopy()) { hash = (37 * hash) + SNAPSHOTCOPY_FIELD_NUMBER; hash = (53 * hash) + getSnapshotCopy().hashCode(); } if (hasCreatedListSize()) { hash = (37 * hash) + CREATEDLISTSIZE_FIELD_NUMBER; hash = (53 * hash) + getCreatedListSize(); } if (getDeletedINodeCount() > 0) { hash = (37 * hash) + DELETEDINODE_FIELD_NUMBER; hash = (53 * hash) + getDeletedINodeList().hashCode(); } if (getDeletedINodeRefCount() > 0) { hash = (37 * hash) + DELETEDINODEREF_FIELD_NUMBER; hash = (53 * hash) + getDeletedINodeRefList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiffOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSnapshotCopyFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); snapshotId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); childrenSize_ = 0; bitField0_ = (bitField0_ & ~0x00000002); isSnapshotRoot_ = false; bitField0_ = (bitField0_ & ~0x00000004); name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); if (snapshotCopyBuilder_ == null) { snapshotCopy_ = null; } else { snapshotCopyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); createdListSize_ = 0; bitField0_ = (bitField0_ & ~0x00000020); deletedINode_ = emptyLongList(); bitField0_ = (bitField0_ & ~0x00000040); deletedINodeRef_ = emptyIntList(); bitField0_ = (bitField0_ & ~0x00000080); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.snapshotId_ = snapshotId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.childrenSize_ = childrenSize_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.isSnapshotRoot_ = isSnapshotRoot_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.name_ = name_; if (((from_bitField0_ & 0x00000010) != 0)) { if (snapshotCopyBuilder_ == null) { result.snapshotCopy_ = snapshotCopy_; } else { result.snapshotCopy_ = snapshotCopyBuilder_.build(); } to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.createdListSize_ = createdListSize_; to_bitField0_ |= 0x00000020; } if (((bitField0_ & 0x00000040) != 0)) { deletedINode_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000040); } result.deletedINode_ = deletedINode_; if (((bitField0_ & 0x00000080) != 0)) { deletedINodeRef_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000080); } result.deletedINodeRef_ = deletedINodeRef_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.getDefaultInstance()) return this; if (other.hasSnapshotId()) { setSnapshotId(other.getSnapshotId()); } if (other.hasChildrenSize()) { setChildrenSize(other.getChildrenSize()); } if (other.hasIsSnapshotRoot()) { setIsSnapshotRoot(other.getIsSnapshotRoot()); } if (other.hasName()) { setName(other.getName()); } if (other.hasSnapshotCopy()) { mergeSnapshotCopy(other.getSnapshotCopy()); } if (other.hasCreatedListSize()) { setCreatedListSize(other.getCreatedListSize()); } if (!other.deletedINode_.isEmpty()) { if (deletedINode_.isEmpty()) { deletedINode_ = other.deletedINode_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureDeletedINodeIsMutable(); deletedINode_.addAll(other.deletedINode_); } onChanged(); } if (!other.deletedINodeRef_.isEmpty()) { if (deletedINodeRef_.isEmpty()) { deletedINodeRef_ = other.deletedINodeRef_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureDeletedINodeRefIsMutable(); deletedINodeRef_.addAll(other.deletedINodeRef_); } onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (hasSnapshotCopy()) { if (!getSnapshotCopy().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int snapshotId_ ; /** * optional uint32 snapshotId = 1; */ public boolean hasSnapshotId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotId = 1; */ public int getSnapshotId() { return snapshotId_; } /** * optional uint32 snapshotId = 1; */ public Builder setSnapshotId(int value) { bitField0_ |= 0x00000001; snapshotId_ = value; onChanged(); return this; } /** * optional uint32 snapshotId = 1; */ public Builder clearSnapshotId() { bitField0_ = (bitField0_ & ~0x00000001); snapshotId_ = 0; onChanged(); return this; } private int childrenSize_ ; /** * optional uint32 childrenSize = 2; */ public boolean hasChildrenSize() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 childrenSize = 2; */ public int getChildrenSize() { return childrenSize_; } /** * optional uint32 childrenSize = 2; */ public Builder setChildrenSize(int value) { bitField0_ |= 0x00000002; childrenSize_ = value; onChanged(); return this; } /** * optional uint32 childrenSize = 2; */ public Builder clearChildrenSize() { bitField0_ = (bitField0_ & ~0x00000002); childrenSize_ = 0; onChanged(); return this; } private boolean isSnapshotRoot_ ; /** * optional bool isSnapshotRoot = 3; */ public boolean hasIsSnapshotRoot() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bool isSnapshotRoot = 3; */ public boolean getIsSnapshotRoot() { return isSnapshotRoot_; } /** * optional bool isSnapshotRoot = 3; */ public Builder setIsSnapshotRoot(boolean value) { bitField0_ |= 0x00000004; isSnapshotRoot_ = value; onChanged(); return this; } /** * optional bool isSnapshotRoot = 3; */ public Builder clearIsSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000004); isSnapshotRoot_ = false; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes name = 4; */ public boolean hasName() { return ((bitField0_ & 0x00000008) != 0); } /** * optional bytes name = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } /** * optional bytes name = 4; */ public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; name_ = value; onChanged(); return this; } /** * optional bytes name = 4; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000008); name_ = getDefaultInstance().getName(); onChanged(); return this; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory snapshotCopy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> snapshotCopyBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public boolean hasSnapshotCopy() { return ((bitField0_ & 0x00000010) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy() { if (snapshotCopyBuilder_ == null) { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_; } else { return snapshotCopyBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public Builder setSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) { if (snapshotCopyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } snapshotCopy_ = value; onChanged(); } else { snapshotCopyBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public Builder setSnapshotCopy( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder builderForValue) { if (snapshotCopyBuilder_ == null) { snapshotCopy_ = builderForValue.build(); onChanged(); } else { snapshotCopyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) { if (snapshotCopyBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0) && snapshotCopy_ != null && snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) { snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial(); } else { snapshotCopy_ = value; } onChanged(); } else { snapshotCopyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public Builder clearSnapshotCopy() { if (snapshotCopyBuilder_ == null) { snapshotCopy_ = null; onChanged(); } else { snapshotCopyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder getSnapshotCopyBuilder() { bitField0_ |= 0x00000010; onChanged(); return getSnapshotCopyFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder() { if (snapshotCopyBuilder_ != null) { return snapshotCopyBuilder_.getMessageOrBuilder(); } else { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> getSnapshotCopyFieldBuilder() { if (snapshotCopyBuilder_ == null) { snapshotCopyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder>( getSnapshotCopy(), getParentForChildren(), isClean()); snapshotCopy_ = null; } return snapshotCopyBuilder_; } private int createdListSize_ ; /** * optional uint32 createdListSize = 6; */ public boolean hasCreatedListSize() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint32 createdListSize = 6; */ public int getCreatedListSize() { return createdListSize_; } /** * optional uint32 createdListSize = 6; */ public Builder setCreatedListSize(int value) { bitField0_ |= 0x00000020; createdListSize_ = value; onChanged(); return this; } /** * optional uint32 createdListSize = 6; */ public Builder clearCreatedListSize() { bitField0_ = (bitField0_ & ~0x00000020); createdListSize_ = 0; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.Internal.LongList deletedINode_ = emptyLongList(); private void ensureDeletedINodeIsMutable() { if (!((bitField0_ & 0x00000040) != 0)) { deletedINode_ = mutableCopy(deletedINode_); bitField0_ |= 0x00000040; } } /** *
         * id of deleted inodes
         * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public java.util.List getDeletedINodeList() { return ((bitField0_ & 0x00000040) != 0) ? java.util.Collections.unmodifiableList(deletedINode_) : deletedINode_; } /** *
         * id of deleted inodes
         * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public int getDeletedINodeCount() { return deletedINode_.size(); } /** *
         * id of deleted inodes
         * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public long getDeletedINode(int index) { return deletedINode_.getLong(index); } /** *
         * id of deleted inodes
         * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public Builder setDeletedINode( int index, long value) { ensureDeletedINodeIsMutable(); deletedINode_.setLong(index, value); onChanged(); return this; } /** *
         * id of deleted inodes
         * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public Builder addDeletedINode(long value) { ensureDeletedINodeIsMutable(); deletedINode_.addLong(value); onChanged(); return this; } /** *
         * id of deleted inodes
         * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public Builder addAllDeletedINode( java.lang.Iterable values) { ensureDeletedINodeIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, deletedINode_); onChanged(); return this; } /** *
         * id of deleted inodes
         * 
* * repeated uint64 deletedINode = 7 [packed = true]; */ public Builder clearDeletedINode() { deletedINode_ = emptyLongList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.Internal.IntList deletedINodeRef_ = emptyIntList(); private void ensureDeletedINodeRefIsMutable() { if (!((bitField0_ & 0x00000080) != 0)) { deletedINodeRef_ = mutableCopy(deletedINodeRef_); bitField0_ |= 0x00000080; } } /** *
         * id of reference nodes in the deleted list
         * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public java.util.List getDeletedINodeRefList() { return ((bitField0_ & 0x00000080) != 0) ? java.util.Collections.unmodifiableList(deletedINodeRef_) : deletedINodeRef_; } /** *
         * id of reference nodes in the deleted list
         * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public int getDeletedINodeRefCount() { return deletedINodeRef_.size(); } /** *
         * id of reference nodes in the deleted list
         * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public int getDeletedINodeRef(int index) { return deletedINodeRef_.getInt(index); } /** *
         * id of reference nodes in the deleted list
         * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public Builder setDeletedINodeRef( int index, int value) { ensureDeletedINodeRefIsMutable(); deletedINodeRef_.setInt(index, value); onChanged(); return this; } /** *
         * id of reference nodes in the deleted list
         * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public Builder addDeletedINodeRef(int value) { ensureDeletedINodeRefIsMutable(); deletedINodeRef_.addInt(value); onChanged(); return this; } /** *
         * id of reference nodes in the deleted list
         * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public Builder addAllDeletedINodeRef( java.lang.Iterable values) { ensureDeletedINodeRefIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, deletedINodeRef_); onChanged(); return this; } /** *
         * id of reference nodes in the deleted list
         * 
* * repeated uint32 deletedINodeRef = 8 [packed = true]; */ public Builder clearDeletedINodeRef() { deletedINodeRef_ = emptyIntList(); bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DirectoryDiff parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DirectoryDiff(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface FileDiffOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 snapshotId = 1; */ boolean hasSnapshotId(); /** * optional uint32 snapshotId = 1; */ int getSnapshotId(); /** * optional uint64 fileSize = 2; */ boolean hasFileSize(); /** * optional uint64 fileSize = 2; */ long getFileSize(); /** * optional bytes name = 3; */ boolean hasName(); /** * optional bytes name = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getName(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ boolean hasSnapshotCopy(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy(); /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder(); /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ java.util.List getBlocksList(); /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index); /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ int getBlocksCount(); /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ java.util.List getBlocksOrBuilderList(); /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff} */ public static final class FileDiff extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff) FileDiffOrBuilder { private static final long serialVersionUID = 0L; // Use FileDiff.newBuilder() to construct. private FileDiff(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private FileDiff() { name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; blocks_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FileDiff( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; snapshotId_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; fileSize_ = input.readUInt64(); break; } case 26: { bitField0_ |= 0x00000004; name_ = input.readBytes(); break; } case 34: { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder subBuilder = null; if (((bitField0_ & 0x00000008) != 0)) { subBuilder = snapshotCopy_.toBuilder(); } snapshotCopy_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(snapshotCopy_); snapshotCopy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { if (!((mutable_bitField0_ & 0x00000010) != 0)) { blocks_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000010; } blocks_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000010) != 0)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.Builder.class); } private int bitField0_; public static final int SNAPSHOTID_FIELD_NUMBER = 1; private int snapshotId_; /** * optional uint32 snapshotId = 1; */ public boolean hasSnapshotId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotId = 1; */ public int getSnapshotId() { return snapshotId_; } public static final int FILESIZE_FIELD_NUMBER = 2; private long fileSize_; /** * optional uint64 fileSize = 2; */ public boolean hasFileSize() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 fileSize = 2; */ public long getFileSize() { return fileSize_; } public static final int NAME_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString name_; /** * optional bytes name = 3; */ public boolean hasName() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes name = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } public static final int SNAPSHOTCOPY_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile snapshotCopy_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public boolean hasSnapshotCopy() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy() { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder() { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_; } public static final int BLOCKS_FIELD_NUMBER = 5; private java.util.List blocks_; /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public java.util.List getBlocksList() { return blocks_; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public java.util.List getBlocksOrBuilderList() { return blocks_; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public int getBlocksCount() { return blocks_.size(); } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) { return blocks_.get(index); } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( int index) { return blocks_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (hasSnapshotCopy()) { if (!getSnapshotCopy().isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, snapshotId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, fileSize_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, name_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(4, getSnapshotCopy()); } for (int i = 0; i < blocks_.size(); i++) { output.writeMessage(5, blocks_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, snapshotId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, fileSize_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, name_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(4, getSnapshotCopy()); } for (int i = 0; i < blocks_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, blocks_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff) obj; if (hasSnapshotId() != other.hasSnapshotId()) return false; if (hasSnapshotId()) { if (getSnapshotId() != other.getSnapshotId()) return false; } if (hasFileSize() != other.hasFileSize()) return false; if (hasFileSize()) { if (getFileSize() != other.getFileSize()) return false; } if (hasName() != other.hasName()) return false; if (hasName()) { if (!getName() .equals(other.getName())) return false; } if (hasSnapshotCopy() != other.hasSnapshotCopy()) return false; if (hasSnapshotCopy()) { if (!getSnapshotCopy() .equals(other.getSnapshotCopy())) return false; } if (!getBlocksList() .equals(other.getBlocksList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSnapshotId()) { hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER; hash = (53 * hash) + getSnapshotId(); } if (hasFileSize()) { hash = (37 * hash) + FILESIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileSize()); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasSnapshotCopy()) { hash = (37 * hash) + SNAPSHOTCOPY_FIELD_NUMBER; hash = (53 * hash) + getSnapshotCopy().hashCode(); } if (getBlocksCount() > 0) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocksList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiffOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSnapshotCopyFieldBuilder(); getBlocksFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); snapshotId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); fileSize_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); if (snapshotCopyBuilder_ == null) { snapshotCopy_ = null; } else { snapshotCopyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000010); } else { blocksBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.snapshotId_ = snapshotId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.fileSize_ = fileSize_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.name_ = name_; if (((from_bitField0_ & 0x00000008) != 0)) { if (snapshotCopyBuilder_ == null) { result.snapshotCopy_ = snapshotCopy_; } else { result.snapshotCopy_ = snapshotCopyBuilder_.build(); } to_bitField0_ |= 0x00000008; } if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); bitField0_ = (bitField0_ & ~0x00000010); } result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.getDefaultInstance()) return this; if (other.hasSnapshotId()) { setSnapshotId(other.getSnapshotId()); } if (other.hasFileSize()) { setFileSize(other.getFileSize()); } if (other.hasName()) { setName(other.getName()); } if (other.hasSnapshotCopy()) { mergeSnapshotCopy(other.getSnapshotCopy()); } if (blocksBuilder_ == null) { if (!other.blocks_.isEmpty()) { if (blocks_.isEmpty()) { blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000010); } else { ensureBlocksIsMutable(); blocks_.addAll(other.blocks_); } onChanged(); } } else { if (!other.blocks_.isEmpty()) { if (blocksBuilder_.isEmpty()) { blocksBuilder_.dispose(); blocksBuilder_ = null; blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000010); blocksBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getBlocksFieldBuilder() : null; } else { blocksBuilder_.addAllMessages(other.blocks_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (hasSnapshotCopy()) { if (!getSnapshotCopy().isInitialized()) { return false; } } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int snapshotId_ ; /** * optional uint32 snapshotId = 1; */ public boolean hasSnapshotId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 snapshotId = 1; */ public int getSnapshotId() { return snapshotId_; } /** * optional uint32 snapshotId = 1; */ public Builder setSnapshotId(int value) { bitField0_ |= 0x00000001; snapshotId_ = value; onChanged(); return this; } /** * optional uint32 snapshotId = 1; */ public Builder clearSnapshotId() { bitField0_ = (bitField0_ & ~0x00000001); snapshotId_ = 0; onChanged(); return this; } private long fileSize_ ; /** * optional uint64 fileSize = 2; */ public boolean hasFileSize() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 fileSize = 2; */ public long getFileSize() { return fileSize_; } /** * optional uint64 fileSize = 2; */ public Builder setFileSize(long value) { bitField0_ |= 0x00000002; fileSize_ = value; onChanged(); return this; } /** * optional uint64 fileSize = 2; */ public Builder clearFileSize() { bitField0_ = (bitField0_ & ~0x00000002); fileSize_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes name = 3; */ public boolean hasName() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes name = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getName() { return name_; } /** * optional bytes name = 3; */ public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; name_ = value; onChanged(); return this; } /** * optional bytes name = 3; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000004); name_ = getDefaultInstance().getName(); onChanged(); return this; } private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile snapshotCopy_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> snapshotCopyBuilder_; /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public boolean hasSnapshotCopy() { return ((bitField0_ & 0x00000008) != 0); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy() { if (snapshotCopyBuilder_ == null) { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_; } else { return snapshotCopyBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public Builder setSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) { if (snapshotCopyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } snapshotCopy_ = value; onChanged(); } else { snapshotCopyBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public Builder setSnapshotCopy( org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder builderForValue) { if (snapshotCopyBuilder_ == null) { snapshotCopy_ = builderForValue.build(); onChanged(); } else { snapshotCopyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) { if (snapshotCopyBuilder_ == null) { if (((bitField0_ & 0x00000008) != 0) && snapshotCopy_ != null && snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) { snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial(); } else { snapshotCopy_ = value; } onChanged(); } else { snapshotCopyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public Builder clearSnapshotCopy() { if (snapshotCopyBuilder_ == null) { snapshotCopy_ = null; onChanged(); } else { snapshotCopyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder getSnapshotCopyBuilder() { bitField0_ |= 0x00000008; onChanged(); return getSnapshotCopyFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder() { if (snapshotCopyBuilder_ != null) { return snapshotCopyBuilder_.getMessageOrBuilder(); } else { return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_; } } /** * optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> getSnapshotCopyFieldBuilder() { if (snapshotCopyBuilder_ == null) { snapshotCopyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder>( getSnapshotCopy(), getParentForChildren(), isClean()); snapshotCopy_ = null; } return snapshotCopyBuilder_; } private java.util.List blocks_ = java.util.Collections.emptyList(); private void ensureBlocksIsMutable() { if (!((bitField0_ & 0x00000010) != 0)) { blocks_ = new java.util.ArrayList(blocks_); bitField0_ |= 0x00000010; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_; /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public java.util.List getBlocksList() { if (blocksBuilder_ == null) { return java.util.Collections.unmodifiableList(blocks_); } else { return blocksBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public int getBlocksCount() { if (blocksBuilder_ == null) { return blocks_.size(); } else { return blocksBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.set(index, value); onChanged(); } else { blocksBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.set(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(value); onChanged(); } else { blocksBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(index, value); onChanged(); } else { blocksBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder addBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder addAllBlocks( java.lang.Iterable values) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, blocks_); onChanged(); } else { blocksBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); } else { blocksBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public Builder removeBlocks(int index) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.remove(index); onChanged(); } else { blocksBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder( int index) { return getBlocksFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public java.util.List getBlocksOrBuilderList() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blocks_); } } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() { return getBlocksFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder( int index) { return getBlocksFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockProto blocks = 5; */ public java.util.List getBlocksBuilderList() { return getBlocksFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( blocks_, ((bitField0_ & 0x00000010) != 0), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public FileDiff parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new FileDiff(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface DiffEntryOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ boolean hasType(); /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType(); /** * optional uint64 inodeId = 2; */ boolean hasInodeId(); /** * optional uint64 inodeId = 2; */ long getInodeId(); /** * optional uint32 numOfDiff = 3; */ boolean hasNumOfDiff(); /** * optional uint32 numOfDiff = 3; */ int getNumOfDiff(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry} */ public static final class DiffEntry extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry) DiffEntryOrBuilder { private static final long serialVersionUID = 0L; // Use DiffEntry.newBuilder() to construct. private DiffEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DiffEntry() { type_ = 1; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DiffEntry( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type value = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; type_ = rawValue; } break; } case 16: { bitField0_ |= 0x00000002; inodeId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; numOfDiff_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Builder.class); } /** * Protobuf enum {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type} */ public enum Type implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum { /** * FILEDIFF = 1; */ FILEDIFF(1), /** * DIRECTORYDIFF = 2; */ DIRECTORYDIFF(2), ; /** * FILEDIFF = 1; */ public static final int FILEDIFF_VALUE = 1; /** * DIRECTORYDIFF = 2; */ public static final int DIRECTORYDIFF_VALUE = 2; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static Type valueOf(int value) { return forNumber(value); } public static Type forNumber(int value) { switch (value) { case 1: return FILEDIFF; case 2: return DIRECTORYDIFF; default: return null; } } public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap< Type> internalValueMap = new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() { public Type findValueByNumber(int number) { return Type.forNumber(number); } }; public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDescriptor().getEnumTypes().get(0); } private static final Type[] VALUES = values(); public static Type valueOf( org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private Type(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type) } private int bitField0_; public static final int TYPE_FIELD_NUMBER = 1; private int type_; /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.valueOf(type_); return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF : result; } public static final int INODEID_FIELD_NUMBER = 2; private long inodeId_; /** * optional uint64 inodeId = 2; */ public boolean hasInodeId() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 inodeId = 2; */ public long getInodeId() { return inodeId_; } public static final int NUMOFDIFF_FIELD_NUMBER = 3; private int numOfDiff_; /** * optional uint32 numOfDiff = 3; */ public boolean hasNumOfDiff() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 numOfDiff = 3; */ public int getNumOfDiff() { return numOfDiff_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasType()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeEnum(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, inodeId_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, numOfDiff_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeEnumSize(1, type_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, inodeId_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, numOfDiff_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry) obj; if (hasType() != other.hasType()) return false; if (hasType()) { if (type_ != other.type_) return false; } if (hasInodeId() != other.hasInodeId()) return false; if (hasInodeId()) { if (getInodeId() != other.getInodeId()) return false; } if (hasNumOfDiff() != other.hasNumOfDiff()) return false; if (hasNumOfDiff()) { if (getNumOfDiff() != other.getNumOfDiff()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + type_; } if (hasInodeId()) { hash = (37 * hash) + INODEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getInodeId()); } if (hasNumOfDiff()) { hash = (37 * hash) + NUMOFDIFF_FIELD_NUMBER; hash = (53 * hash) + getNumOfDiff(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntryOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); type_ = 1; bitField0_ = (bitField0_ & ~0x00000001); inodeId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); numOfDiff_ = 0; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; if (((from_bitField0_ & 0x00000002) != 0)) { result.inodeId_ = inodeId_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.numOfDiff_ = numOfDiff_; to_bitField0_ |= 0x00000004; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDefaultInstance()) return this; if (other.hasType()) { setType(other.getType()); } if (other.hasInodeId()) { setInodeId(other.getInodeId()); } if (other.hasNumOfDiff()) { setNumOfDiff(other.getNumOfDiff()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasType()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int type_ = 1; /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType() { @SuppressWarnings("deprecation") org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.valueOf(type_); return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF : result; } /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ public Builder setType(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; type_ = value.getNumber(); onChanged(); return this; } /** * required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1; */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); type_ = 1; onChanged(); return this; } private long inodeId_ ; /** * optional uint64 inodeId = 2; */ public boolean hasInodeId() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 inodeId = 2; */ public long getInodeId() { return inodeId_; } /** * optional uint64 inodeId = 2; */ public Builder setInodeId(long value) { bitField0_ |= 0x00000002; inodeId_ = value; onChanged(); return this; } /** * optional uint64 inodeId = 2; */ public Builder clearInodeId() { bitField0_ = (bitField0_ & ~0x00000002); inodeId_ = 0L; onChanged(); return this; } private int numOfDiff_ ; /** * optional uint32 numOfDiff = 3; */ public boolean hasNumOfDiff() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 numOfDiff = 3; */ public int getNumOfDiff() { return numOfDiff_; } /** * optional uint32 numOfDiff = 3; */ public Builder setNumOfDiff(int value) { bitField0_ |= 0x00000004; numOfDiff_ = value; onChanged(); return this; } /** * optional uint32 numOfDiff = 3; */ public Builder clearNumOfDiff() { bitField0_ = (bitField0_ & ~0x00000004); numOfDiff_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DiffEntry parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DiffEntry(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * This section records information about snapshot diffs
     * NAME: SNAPSHOT_DIFF
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SnapshotDiffSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SnapshotDiffSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StringTableSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.StringTableSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 numEntry = 1; */ boolean hasNumEntry(); /** * optional uint32 numEntry = 1; */ int getNumEntry(); /** *
     * repeated Entry
     * 
* * optional uint32 maskBits = 2 [default = 0]; */ boolean hasMaskBits(); /** *
     * repeated Entry
     * 
* * optional uint32 maskBits = 2 [default = 0]; */ int getMaskBits(); } /** *
   **
   * This section maps string to id
   * NAME: STRING_TABLE
   * 
* * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection} */ public static final class StringTableSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.StringTableSection) StringTableSectionOrBuilder { private static final long serialVersionUID = 0L; // Use StringTableSection.newBuilder() to construct. private StringTableSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StringTableSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StringTableSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; numEntry_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; maskBits_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Builder.class); } public interface EntryOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.StringTableSection.Entry) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 id = 1; */ boolean hasId(); /** * optional uint32 id = 1; */ int getId(); /** * optional string str = 2; */ boolean hasStr(); /** * optional string str = 2; */ java.lang.String getStr(); /** * optional string str = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getStrBytes(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection.Entry} */ public static final class Entry extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.StringTableSection.Entry) EntryOrBuilder { private static final long serialVersionUID = 0L; // Use Entry.newBuilder() to construct. private Entry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Entry() { str_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Entry( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; id_ = input.readUInt32(); break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; str_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.Builder.class); } private int bitField0_; public static final int ID_FIELD_NUMBER = 1; private int id_; /** * optional uint32 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 id = 1; */ public int getId() { return id_; } public static final int STR_FIELD_NUMBER = 2; private volatile java.lang.Object str_; /** * optional string str = 2; */ public boolean hasStr() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string str = 2; */ public java.lang.String getStr() { java.lang.Object ref = str_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { str_ = s; } return s; } } /** * optional string str = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStrBytes() { java.lang.Object ref = str_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); str_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, id_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, str_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, id_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, str_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry) obj; if (hasId() != other.hasId()) return false; if (hasId()) { if (getId() != other.getId()) return false; } if (hasStr() != other.hasStr()) return false; if (hasStr()) { if (!getStr() .equals(other.getStr())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + getId(); } if (hasStr()) { hash = (37 * hash) + STR_FIELD_NUMBER; hash = (53 * hash) + getStr().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection.Entry} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.StringTableSection.Entry) org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.EntryOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); id_ = 0; bitField0_ = (bitField0_ & ~0x00000001); str_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.id_ = id_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.str_ = str_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } if (other.hasStr()) { bitField0_ |= 0x00000002; str_ = other.str_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int id_ ; /** * optional uint32 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 id = 1; */ public int getId() { return id_; } /** * optional uint32 id = 1; */ public Builder setId(int value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } /** * optional uint32 id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0; onChanged(); return this; } private java.lang.Object str_ = ""; /** * optional string str = 2; */ public boolean hasStr() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string str = 2; */ public java.lang.String getStr() { java.lang.Object ref = str_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { str_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string str = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getStrBytes() { java.lang.Object ref = str_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); str_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string str = 2; */ public Builder setStr( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; str_ = value; onChanged(); return this; } /** * optional string str = 2; */ public Builder clearStr() { bitField0_ = (bitField0_ & ~0x00000002); str_ = getDefaultInstance().getStr(); onChanged(); return this; } /** * optional string str = 2; */ public Builder setStrBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; str_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.StringTableSection.Entry) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.StringTableSection.Entry) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public Entry parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new Entry(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; public static final int NUMENTRY_FIELD_NUMBER = 1; private int numEntry_; /** * optional uint32 numEntry = 1; */ public boolean hasNumEntry() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 numEntry = 1; */ public int getNumEntry() { return numEntry_; } public static final int MASKBITS_FIELD_NUMBER = 2; private int maskBits_; /** *
     * repeated Entry
     * 
* * optional uint32 maskBits = 2 [default = 0]; */ public boolean hasMaskBits() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * repeated Entry
     * 
* * optional uint32 maskBits = 2 [default = 0]; */ public int getMaskBits() { return maskBits_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, numEntry_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, maskBits_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, numEntry_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, maskBits_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection) obj; if (hasNumEntry() != other.hasNumEntry()) return false; if (hasNumEntry()) { if (getNumEntry() != other.getNumEntry()) return false; } if (hasMaskBits() != other.hasMaskBits()) return false; if (hasMaskBits()) { if (getMaskBits() != other.getMaskBits()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasNumEntry()) { hash = (37 * hash) + NUMENTRY_FIELD_NUMBER; hash = (53 * hash) + getNumEntry(); } if (hasMaskBits()) { hash = (37 * hash) + MASKBITS_FIELD_NUMBER; hash = (53 * hash) + getMaskBits(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * This section maps string to id
     * NAME: STRING_TABLE
     * 
* * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.StringTableSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); numEntry_ = 0; bitField0_ = (bitField0_ & ~0x00000001); maskBits_ = 0; bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.numEntry_ = numEntry_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.maskBits_ = maskBits_; to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.getDefaultInstance()) return this; if (other.hasNumEntry()) { setNumEntry(other.getNumEntry()); } if (other.hasMaskBits()) { setMaskBits(other.getMaskBits()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int numEntry_ ; /** * optional uint32 numEntry = 1; */ public boolean hasNumEntry() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 numEntry = 1; */ public int getNumEntry() { return numEntry_; } /** * optional uint32 numEntry = 1; */ public Builder setNumEntry(int value) { bitField0_ |= 0x00000001; numEntry_ = value; onChanged(); return this; } /** * optional uint32 numEntry = 1; */ public Builder clearNumEntry() { bitField0_ = (bitField0_ & ~0x00000001); numEntry_ = 0; onChanged(); return this; } private int maskBits_ ; /** *
       * repeated Entry
       * 
* * optional uint32 maskBits = 2 [default = 0]; */ public boolean hasMaskBits() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * repeated Entry
       * 
* * optional uint32 maskBits = 2 [default = 0]; */ public int getMaskBits() { return maskBits_; } /** *
       * repeated Entry
       * 
* * optional uint32 maskBits = 2 [default = 0]; */ public Builder setMaskBits(int value) { bitField0_ |= 0x00000002; maskBits_ = value; onChanged(); return this; } /** *
       * repeated Entry
       * 
* * optional uint32 maskBits = 2 [default = 0]; */ public Builder clearMaskBits() { bitField0_ = (bitField0_ & ~0x00000002); maskBits_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.StringTableSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.StringTableSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StringTableSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StringTableSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface SecretManagerSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SecretManagerSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 currentId = 1; */ boolean hasCurrentId(); /** * optional uint32 currentId = 1; */ int getCurrentId(); /** * optional uint32 tokenSequenceNumber = 2; */ boolean hasTokenSequenceNumber(); /** * optional uint32 tokenSequenceNumber = 2; */ int getTokenSequenceNumber(); /** * optional uint32 numKeys = 3; */ boolean hasNumKeys(); /** * optional uint32 numKeys = 3; */ int getNumKeys(); /** *
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * 
* * optional uint32 numTokens = 4; */ boolean hasNumTokens(); /** *
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * 
* * optional uint32 numTokens = 4; */ int getNumTokens(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection} */ public static final class SecretManagerSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SecretManagerSection) SecretManagerSectionOrBuilder { private static final long serialVersionUID = 0L; // Use SecretManagerSection.newBuilder() to construct. private SecretManagerSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SecretManagerSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SecretManagerSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; currentId_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; tokenSequenceNumber_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; numKeys_ = input.readUInt32(); break; } case 32: { bitField0_ |= 0x00000008; numTokens_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.Builder.class); } public interface DelegationKeyOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 id = 1; */ boolean hasId(); /** * optional uint32 id = 1; */ int getId(); /** * optional uint64 expiryDate = 2; */ boolean hasExpiryDate(); /** * optional uint64 expiryDate = 2; */ long getExpiryDate(); /** * optional bytes key = 3; */ boolean hasKey(); /** * optional bytes key = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getKey(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey} */ public static final class DelegationKey extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey) DelegationKeyOrBuilder { private static final long serialVersionUID = 0L; // Use DelegationKey.newBuilder() to construct. private DelegationKey(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private DelegationKey() { key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DelegationKey( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; id_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; expiryDate_ = input.readUInt64(); break; } case 26: { bitField0_ |= 0x00000004; key_ = input.readBytes(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.Builder.class); } private int bitField0_; public static final int ID_FIELD_NUMBER = 1; private int id_; /** * optional uint32 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 id = 1; */ public int getId() { return id_; } public static final int EXPIRYDATE_FIELD_NUMBER = 2; private long expiryDate_; /** * optional uint64 expiryDate = 2; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 expiryDate = 2; */ public long getExpiryDate() { return expiryDate_; } public static final int KEY_FIELD_NUMBER = 3; private org.apache.hadoop.thirdparty.protobuf.ByteString key_; /** * optional bytes key = 3; */ public boolean hasKey() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes key = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, id_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, expiryDate_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeBytes(3, key_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, id_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, expiryDate_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBytesSize(3, key_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey) obj; if (hasId() != other.hasId()) return false; if (hasId()) { if (getId() != other.getId()) return false; } if (hasExpiryDate() != other.hasExpiryDate()) return false; if (hasExpiryDate()) { if (getExpiryDate() != other.getExpiryDate()) return false; } if (hasKey() != other.hasKey()) return false; if (hasKey()) { if (!getKey() .equals(other.getKey())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + getId(); } if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getExpiryDate()); } if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKeyOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); id_ = 0; bitField0_ = (bitField0_ & ~0x00000001); expiryDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.id_ = id_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.expiryDate_ = expiryDate_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.key_ = key_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } if (other.hasKey()) { setKey(other.getKey()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int id_ ; /** * optional uint32 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 id = 1; */ public int getId() { return id_; } /** * optional uint32 id = 1; */ public Builder setId(int value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } /** * optional uint32 id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0; onChanged(); return this; } private long expiryDate_ ; /** * optional uint64 expiryDate = 2; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint64 expiryDate = 2; */ public long getExpiryDate() { return expiryDate_; } /** * optional uint64 expiryDate = 2; */ public Builder setExpiryDate(long value) { bitField0_ |= 0x00000002; expiryDate_ = value; onChanged(); return this; } /** * optional uint64 expiryDate = 2; */ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000002); expiryDate_ = 0L; onChanged(); return this; } private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY; /** * optional bytes key = 3; */ public boolean hasKey() { return ((bitField0_ & 0x00000004) != 0); } /** * optional bytes key = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() { return key_; } /** * optional bytes key = 3; */ public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; } /** * optional bytes key = 3; */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000004); key_ = getDefaultInstance().getKey(); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public DelegationKey parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new DelegationKey(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface PersistTokenOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint32 version = 1; */ boolean hasVersion(); /** * optional uint32 version = 1; */ int getVersion(); /** * optional string owner = 2; */ boolean hasOwner(); /** * optional string owner = 2; */ java.lang.String getOwner(); /** * optional string owner = 2; */ org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes(); /** * optional string renewer = 3; */ boolean hasRenewer(); /** * optional string renewer = 3; */ java.lang.String getRenewer(); /** * optional string renewer = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getRenewerBytes(); /** * optional string realUser = 4; */ boolean hasRealUser(); /** * optional string realUser = 4; */ java.lang.String getRealUser(); /** * optional string realUser = 4; */ org.apache.hadoop.thirdparty.protobuf.ByteString getRealUserBytes(); /** * optional uint64 issueDate = 5; */ boolean hasIssueDate(); /** * optional uint64 issueDate = 5; */ long getIssueDate(); /** * optional uint64 maxDate = 6; */ boolean hasMaxDate(); /** * optional uint64 maxDate = 6; */ long getMaxDate(); /** * optional uint32 sequenceNumber = 7; */ boolean hasSequenceNumber(); /** * optional uint32 sequenceNumber = 7; */ int getSequenceNumber(); /** * optional uint32 masterKeyId = 8; */ boolean hasMasterKeyId(); /** * optional uint32 masterKeyId = 8; */ int getMasterKeyId(); /** * optional uint64 expiryDate = 9; */ boolean hasExpiryDate(); /** * optional uint64 expiryDate = 9; */ long getExpiryDate(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.PersistToken} */ public static final class PersistToken extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken) PersistTokenOrBuilder { private static final long serialVersionUID = 0L; // Use PersistToken.newBuilder() to construct. private PersistToken(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private PersistToken() { owner_ = ""; renewer_ = ""; realUser_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PersistToken( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; version_ = input.readUInt32(); break; } case 18: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; owner_ = bs; break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; renewer_ = bs; break; } case 34: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000008; realUser_ = bs; break; } case 40: { bitField0_ |= 0x00000010; issueDate_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; maxDate_ = input.readUInt64(); break; } case 56: { bitField0_ |= 0x00000040; sequenceNumber_ = input.readUInt32(); break; } case 64: { bitField0_ |= 0x00000080; masterKeyId_ = input.readUInt32(); break; } case 72: { bitField0_ |= 0x00000100; expiryDate_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.Builder.class); } private int bitField0_; public static final int VERSION_FIELD_NUMBER = 1; private int version_; /** * optional uint32 version = 1; */ public boolean hasVersion() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 version = 1; */ public int getVersion() { return version_; } public static final int OWNER_FIELD_NUMBER = 2; private volatile java.lang.Object owner_; /** * optional string owner = 2; */ public boolean hasOwner() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string owner = 2; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } } /** * optional string owner = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int RENEWER_FIELD_NUMBER = 3; private volatile java.lang.Object renewer_; /** * optional string renewer = 3; */ public boolean hasRenewer() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string renewer = 3; */ public java.lang.String getRenewer() { java.lang.Object ref = renewer_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { renewer_ = s; } return s; } } /** * optional string renewer = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getRenewerBytes() { java.lang.Object ref = renewer_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); renewer_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int REALUSER_FIELD_NUMBER = 4; private volatile java.lang.Object realUser_; /** * optional string realUser = 4; */ public boolean hasRealUser() { return ((bitField0_ & 0x00000008) != 0); } /** * optional string realUser = 4; */ public java.lang.String getRealUser() { java.lang.Object ref = realUser_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { realUser_ = s; } return s; } } /** * optional string realUser = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getRealUserBytes() { java.lang.Object ref = realUser_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); realUser_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } public static final int ISSUEDATE_FIELD_NUMBER = 5; private long issueDate_; /** * optional uint64 issueDate = 5; */ public boolean hasIssueDate() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 issueDate = 5; */ public long getIssueDate() { return issueDate_; } public static final int MAXDATE_FIELD_NUMBER = 6; private long maxDate_; /** * optional uint64 maxDate = 6; */ public boolean hasMaxDate() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 maxDate = 6; */ public long getMaxDate() { return maxDate_; } public static final int SEQUENCENUMBER_FIELD_NUMBER = 7; private int sequenceNumber_; /** * optional uint32 sequenceNumber = 7; */ public boolean hasSequenceNumber() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint32 sequenceNumber = 7; */ public int getSequenceNumber() { return sequenceNumber_; } public static final int MASTERKEYID_FIELD_NUMBER = 8; private int masterKeyId_; /** * optional uint32 masterKeyId = 8; */ public boolean hasMasterKeyId() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint32 masterKeyId = 8; */ public int getMasterKeyId() { return masterKeyId_; } public static final int EXPIRYDATE_FIELD_NUMBER = 9; private long expiryDate_; /** * optional uint64 expiryDate = 9; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 expiryDate = 9; */ public long getExpiryDate() { return expiryDate_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, version_); } if (((bitField0_ & 0x00000002) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, owner_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, renewer_); } if (((bitField0_ & 0x00000008) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, realUser_); } if (((bitField0_ & 0x00000010) != 0)) { output.writeUInt64(5, issueDate_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeUInt64(6, maxDate_); } if (((bitField0_ & 0x00000040) != 0)) { output.writeUInt32(7, sequenceNumber_); } if (((bitField0_ & 0x00000080) != 0)) { output.writeUInt32(8, masterKeyId_); } if (((bitField0_ & 0x00000100) != 0)) { output.writeUInt64(9, expiryDate_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, version_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, owner_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, renewer_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, realUser_); } if (((bitField0_ & 0x00000010) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(5, issueDate_); } if (((bitField0_ & 0x00000020) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(6, maxDate_); } if (((bitField0_ & 0x00000040) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(7, sequenceNumber_); } if (((bitField0_ & 0x00000080) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(8, masterKeyId_); } if (((bitField0_ & 0x00000100) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(9, expiryDate_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken) obj; if (hasVersion() != other.hasVersion()) return false; if (hasVersion()) { if (getVersion() != other.getVersion()) return false; } if (hasOwner() != other.hasOwner()) return false; if (hasOwner()) { if (!getOwner() .equals(other.getOwner())) return false; } if (hasRenewer() != other.hasRenewer()) return false; if (hasRenewer()) { if (!getRenewer() .equals(other.getRenewer())) return false; } if (hasRealUser() != other.hasRealUser()) return false; if (hasRealUser()) { if (!getRealUser() .equals(other.getRealUser())) return false; } if (hasIssueDate() != other.hasIssueDate()) return false; if (hasIssueDate()) { if (getIssueDate() != other.getIssueDate()) return false; } if (hasMaxDate() != other.hasMaxDate()) return false; if (hasMaxDate()) { if (getMaxDate() != other.getMaxDate()) return false; } if (hasSequenceNumber() != other.hasSequenceNumber()) return false; if (hasSequenceNumber()) { if (getSequenceNumber() != other.getSequenceNumber()) return false; } if (hasMasterKeyId() != other.hasMasterKeyId()) return false; if (hasMasterKeyId()) { if (getMasterKeyId() != other.getMasterKeyId()) return false; } if (hasExpiryDate() != other.hasExpiryDate()) return false; if (hasExpiryDate()) { if (getExpiryDate() != other.getExpiryDate()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasVersion()) { hash = (37 * hash) + VERSION_FIELD_NUMBER; hash = (53 * hash) + getVersion(); } if (hasOwner()) { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } if (hasRenewer()) { hash = (37 * hash) + RENEWER_FIELD_NUMBER; hash = (53 * hash) + getRenewer().hashCode(); } if (hasRealUser()) { hash = (37 * hash) + REALUSER_FIELD_NUMBER; hash = (53 * hash) + getRealUser().hashCode(); } if (hasIssueDate()) { hash = (37 * hash) + ISSUEDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getIssueDate()); } if (hasMaxDate()) { hash = (37 * hash) + MAXDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getMaxDate()); } if (hasSequenceNumber()) { hash = (37 * hash) + SEQUENCENUMBER_FIELD_NUMBER; hash = (53 * hash) + getSequenceNumber(); } if (hasMasterKeyId()) { hash = (37 * hash) + MASTERKEYID_FIELD_NUMBER; hash = (53 * hash) + getMasterKeyId(); } if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getExpiryDate()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.PersistToken} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistTokenOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); version_ = 0; bitField0_ = (bitField0_ & ~0x00000001); owner_ = ""; bitField0_ = (bitField0_ & ~0x00000002); renewer_ = ""; bitField0_ = (bitField0_ & ~0x00000004); realUser_ = ""; bitField0_ = (bitField0_ & ~0x00000008); issueDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); maxDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); sequenceNumber_ = 0; bitField0_ = (bitField0_ & ~0x00000040); masterKeyId_ = 0; bitField0_ = (bitField0_ & ~0x00000080); expiryDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000100); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.version_ = version_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { to_bitField0_ |= 0x00000002; } result.owner_ = owner_; if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.renewer_ = renewer_; if (((from_bitField0_ & 0x00000008) != 0)) { to_bitField0_ |= 0x00000008; } result.realUser_ = realUser_; if (((from_bitField0_ & 0x00000010) != 0)) { result.issueDate_ = issueDate_; to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00000020) != 0)) { result.maxDate_ = maxDate_; to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00000040) != 0)) { result.sequenceNumber_ = sequenceNumber_; to_bitField0_ |= 0x00000040; } if (((from_bitField0_ & 0x00000080) != 0)) { result.masterKeyId_ = masterKeyId_; to_bitField0_ |= 0x00000080; } if (((from_bitField0_ & 0x00000100) != 0)) { result.expiryDate_ = expiryDate_; to_bitField0_ |= 0x00000100; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.getDefaultInstance()) return this; if (other.hasVersion()) { setVersion(other.getVersion()); } if (other.hasOwner()) { bitField0_ |= 0x00000002; owner_ = other.owner_; onChanged(); } if (other.hasRenewer()) { bitField0_ |= 0x00000004; renewer_ = other.renewer_; onChanged(); } if (other.hasRealUser()) { bitField0_ |= 0x00000008; realUser_ = other.realUser_; onChanged(); } if (other.hasIssueDate()) { setIssueDate(other.getIssueDate()); } if (other.hasMaxDate()) { setMaxDate(other.getMaxDate()); } if (other.hasSequenceNumber()) { setSequenceNumber(other.getSequenceNumber()); } if (other.hasMasterKeyId()) { setMasterKeyId(other.getMasterKeyId()); } if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int version_ ; /** * optional uint32 version = 1; */ public boolean hasVersion() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 version = 1; */ public int getVersion() { return version_; } /** * optional uint32 version = 1; */ public Builder setVersion(int value) { bitField0_ |= 0x00000001; version_ = value; onChanged(); return this; } /** * optional uint32 version = 1; */ public Builder clearVersion() { bitField0_ = (bitField0_ & ~0x00000001); version_ = 0; onChanged(); return this; } private java.lang.Object owner_ = ""; /** * optional string owner = 2; */ public boolean hasOwner() { return ((bitField0_ & 0x00000002) != 0); } /** * optional string owner = 2; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string owner = 2; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string owner = 2; */ public Builder setOwner( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; owner_ = value; onChanged(); return this; } /** * optional string owner = 2; */ public Builder clearOwner() { bitField0_ = (bitField0_ & ~0x00000002); owner_ = getDefaultInstance().getOwner(); onChanged(); return this; } /** * optional string owner = 2; */ public Builder setOwnerBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; owner_ = value; onChanged(); return this; } private java.lang.Object renewer_ = ""; /** * optional string renewer = 3; */ public boolean hasRenewer() { return ((bitField0_ & 0x00000004) != 0); } /** * optional string renewer = 3; */ public java.lang.String getRenewer() { java.lang.Object ref = renewer_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { renewer_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string renewer = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getRenewerBytes() { java.lang.Object ref = renewer_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); renewer_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string renewer = 3; */ public Builder setRenewer( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; renewer_ = value; onChanged(); return this; } /** * optional string renewer = 3; */ public Builder clearRenewer() { bitField0_ = (bitField0_ & ~0x00000004); renewer_ = getDefaultInstance().getRenewer(); onChanged(); return this; } /** * optional string renewer = 3; */ public Builder setRenewerBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; renewer_ = value; onChanged(); return this; } private java.lang.Object realUser_ = ""; /** * optional string realUser = 4; */ public boolean hasRealUser() { return ((bitField0_ & 0x00000008) != 0); } /** * optional string realUser = 4; */ public java.lang.String getRealUser() { java.lang.Object ref = realUser_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { realUser_ = s; } return s; } else { return (java.lang.String) ref; } } /** * optional string realUser = 4; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getRealUserBytes() { java.lang.Object ref = realUser_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); realUser_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * optional string realUser = 4; */ public Builder setRealUser( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; realUser_ = value; onChanged(); return this; } /** * optional string realUser = 4; */ public Builder clearRealUser() { bitField0_ = (bitField0_ & ~0x00000008); realUser_ = getDefaultInstance().getRealUser(); onChanged(); return this; } /** * optional string realUser = 4; */ public Builder setRealUserBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; realUser_ = value; onChanged(); return this; } private long issueDate_ ; /** * optional uint64 issueDate = 5; */ public boolean hasIssueDate() { return ((bitField0_ & 0x00000010) != 0); } /** * optional uint64 issueDate = 5; */ public long getIssueDate() { return issueDate_; } /** * optional uint64 issueDate = 5; */ public Builder setIssueDate(long value) { bitField0_ |= 0x00000010; issueDate_ = value; onChanged(); return this; } /** * optional uint64 issueDate = 5; */ public Builder clearIssueDate() { bitField0_ = (bitField0_ & ~0x00000010); issueDate_ = 0L; onChanged(); return this; } private long maxDate_ ; /** * optional uint64 maxDate = 6; */ public boolean hasMaxDate() { return ((bitField0_ & 0x00000020) != 0); } /** * optional uint64 maxDate = 6; */ public long getMaxDate() { return maxDate_; } /** * optional uint64 maxDate = 6; */ public Builder setMaxDate(long value) { bitField0_ |= 0x00000020; maxDate_ = value; onChanged(); return this; } /** * optional uint64 maxDate = 6; */ public Builder clearMaxDate() { bitField0_ = (bitField0_ & ~0x00000020); maxDate_ = 0L; onChanged(); return this; } private int sequenceNumber_ ; /** * optional uint32 sequenceNumber = 7; */ public boolean hasSequenceNumber() { return ((bitField0_ & 0x00000040) != 0); } /** * optional uint32 sequenceNumber = 7; */ public int getSequenceNumber() { return sequenceNumber_; } /** * optional uint32 sequenceNumber = 7; */ public Builder setSequenceNumber(int value) { bitField0_ |= 0x00000040; sequenceNumber_ = value; onChanged(); return this; } /** * optional uint32 sequenceNumber = 7; */ public Builder clearSequenceNumber() { bitField0_ = (bitField0_ & ~0x00000040); sequenceNumber_ = 0; onChanged(); return this; } private int masterKeyId_ ; /** * optional uint32 masterKeyId = 8; */ public boolean hasMasterKeyId() { return ((bitField0_ & 0x00000080) != 0); } /** * optional uint32 masterKeyId = 8; */ public int getMasterKeyId() { return masterKeyId_; } /** * optional uint32 masterKeyId = 8; */ public Builder setMasterKeyId(int value) { bitField0_ |= 0x00000080; masterKeyId_ = value; onChanged(); return this; } /** * optional uint32 masterKeyId = 8; */ public Builder clearMasterKeyId() { bitField0_ = (bitField0_ & ~0x00000080); masterKeyId_ = 0; onChanged(); return this; } private long expiryDate_ ; /** * optional uint64 expiryDate = 9; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000100) != 0); } /** * optional uint64 expiryDate = 9; */ public long getExpiryDate() { return expiryDate_; } /** * optional uint64 expiryDate = 9; */ public Builder setExpiryDate(long value) { bitField0_ |= 0x00000100; expiryDate_ = value; onChanged(); return this; } /** * optional uint64 expiryDate = 9; */ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000100); expiryDate_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public PersistToken parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new PersistToken(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; public static final int CURRENTID_FIELD_NUMBER = 1; private int currentId_; /** * optional uint32 currentId = 1; */ public boolean hasCurrentId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 currentId = 1; */ public int getCurrentId() { return currentId_; } public static final int TOKENSEQUENCENUMBER_FIELD_NUMBER = 2; private int tokenSequenceNumber_; /** * optional uint32 tokenSequenceNumber = 2; */ public boolean hasTokenSequenceNumber() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 tokenSequenceNumber = 2; */ public int getTokenSequenceNumber() { return tokenSequenceNumber_; } public static final int NUMKEYS_FIELD_NUMBER = 3; private int numKeys_; /** * optional uint32 numKeys = 3; */ public boolean hasNumKeys() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 numKeys = 3; */ public int getNumKeys() { return numKeys_; } public static final int NUMTOKENS_FIELD_NUMBER = 4; private int numTokens_; /** *
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * 
* * optional uint32 numTokens = 4; */ public boolean hasNumTokens() { return ((bitField0_ & 0x00000008) != 0); } /** *
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * 
* * optional uint32 numTokens = 4; */ public int getNumTokens() { return numTokens_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt32(1, currentId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, tokenSequenceNumber_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, numKeys_); } if (((bitField0_ & 0x00000008) != 0)) { output.writeUInt32(4, numTokens_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(1, currentId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, tokenSequenceNumber_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, numKeys_); } if (((bitField0_ & 0x00000008) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(4, numTokens_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection) obj; if (hasCurrentId() != other.hasCurrentId()) return false; if (hasCurrentId()) { if (getCurrentId() != other.getCurrentId()) return false; } if (hasTokenSequenceNumber() != other.hasTokenSequenceNumber()) return false; if (hasTokenSequenceNumber()) { if (getTokenSequenceNumber() != other.getTokenSequenceNumber()) return false; } if (hasNumKeys() != other.hasNumKeys()) return false; if (hasNumKeys()) { if (getNumKeys() != other.getNumKeys()) return false; } if (hasNumTokens() != other.hasNumTokens()) return false; if (hasNumTokens()) { if (getNumTokens() != other.getNumTokens()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasCurrentId()) { hash = (37 * hash) + CURRENTID_FIELD_NUMBER; hash = (53 * hash) + getCurrentId(); } if (hasTokenSequenceNumber()) { hash = (37 * hash) + TOKENSEQUENCENUMBER_FIELD_NUMBER; hash = (53 * hash) + getTokenSequenceNumber(); } if (hasNumKeys()) { hash = (37 * hash) + NUMKEYS_FIELD_NUMBER; hash = (53 * hash) + getNumKeys(); } if (hasNumTokens()) { hash = (37 * hash) + NUMTOKENS_FIELD_NUMBER; hash = (53 * hash) + getNumTokens(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SecretManagerSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); currentId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); tokenSequenceNumber_ = 0; bitField0_ = (bitField0_ & ~0x00000002); numKeys_ = 0; bitField0_ = (bitField0_ & ~0x00000004); numTokens_ = 0; bitField0_ = (bitField0_ & ~0x00000008); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.currentId_ = currentId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.tokenSequenceNumber_ = tokenSequenceNumber_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.numKeys_ = numKeys_; to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00000008) != 0)) { result.numTokens_ = numTokens_; to_bitField0_ |= 0x00000008; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.getDefaultInstance()) return this; if (other.hasCurrentId()) { setCurrentId(other.getCurrentId()); } if (other.hasTokenSequenceNumber()) { setTokenSequenceNumber(other.getTokenSequenceNumber()); } if (other.hasNumKeys()) { setNumKeys(other.getNumKeys()); } if (other.hasNumTokens()) { setNumTokens(other.getNumTokens()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int currentId_ ; /** * optional uint32 currentId = 1; */ public boolean hasCurrentId() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint32 currentId = 1; */ public int getCurrentId() { return currentId_; } /** * optional uint32 currentId = 1; */ public Builder setCurrentId(int value) { bitField0_ |= 0x00000001; currentId_ = value; onChanged(); return this; } /** * optional uint32 currentId = 1; */ public Builder clearCurrentId() { bitField0_ = (bitField0_ & ~0x00000001); currentId_ = 0; onChanged(); return this; } private int tokenSequenceNumber_ ; /** * optional uint32 tokenSequenceNumber = 2; */ public boolean hasTokenSequenceNumber() { return ((bitField0_ & 0x00000002) != 0); } /** * optional uint32 tokenSequenceNumber = 2; */ public int getTokenSequenceNumber() { return tokenSequenceNumber_; } /** * optional uint32 tokenSequenceNumber = 2; */ public Builder setTokenSequenceNumber(int value) { bitField0_ |= 0x00000002; tokenSequenceNumber_ = value; onChanged(); return this; } /** * optional uint32 tokenSequenceNumber = 2; */ public Builder clearTokenSequenceNumber() { bitField0_ = (bitField0_ & ~0x00000002); tokenSequenceNumber_ = 0; onChanged(); return this; } private int numKeys_ ; /** * optional uint32 numKeys = 3; */ public boolean hasNumKeys() { return ((bitField0_ & 0x00000004) != 0); } /** * optional uint32 numKeys = 3; */ public int getNumKeys() { return numKeys_; } /** * optional uint32 numKeys = 3; */ public Builder setNumKeys(int value) { bitField0_ |= 0x00000004; numKeys_ = value; onChanged(); return this; } /** * optional uint32 numKeys = 3; */ public Builder clearNumKeys() { bitField0_ = (bitField0_ & ~0x00000004); numKeys_ = 0; onChanged(); return this; } private int numTokens_ ; /** *
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * 
* * optional uint32 numTokens = 4; */ public boolean hasNumTokens() { return ((bitField0_ & 0x00000008) != 0); } /** *
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * 
* * optional uint32 numTokens = 4; */ public int getNumTokens() { return numTokens_; } /** *
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * 
* * optional uint32 numTokens = 4; */ public Builder setNumTokens(int value) { bitField0_ |= 0x00000008; numTokens_ = value; onChanged(); return this; } /** *
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * 
* * optional uint32 numTokens = 4; */ public Builder clearNumTokens() { bitField0_ = (bitField0_ & ~0x00000008); numTokens_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public SecretManagerSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new SecretManagerSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface CacheManagerSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.CacheManagerSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 nextDirectiveId = 1; */ boolean hasNextDirectiveId(); /** * required uint64 nextDirectiveId = 1; */ long getNextDirectiveId(); /** * required uint32 numPools = 2; */ boolean hasNumPools(); /** * required uint32 numPools = 2; */ int getNumPools(); /** *
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * 
* * required uint32 numDirectives = 3; */ boolean hasNumDirectives(); /** *
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * 
* * required uint32 numDirectives = 3; */ int getNumDirectives(); } /** * Protobuf type {@code hadoop.hdfs.fsimage.CacheManagerSection} */ public static final class CacheManagerSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.CacheManagerSection) CacheManagerSectionOrBuilder { private static final long serialVersionUID = 0L; // Use CacheManagerSection.newBuilder() to construct. private CacheManagerSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private CacheManagerSection() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CacheManagerSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; nextDirectiveId_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; numPools_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; numDirectives_ = input.readUInt32(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.Builder.class); } private int bitField0_; public static final int NEXTDIRECTIVEID_FIELD_NUMBER = 1; private long nextDirectiveId_; /** * required uint64 nextDirectiveId = 1; */ public boolean hasNextDirectiveId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 nextDirectiveId = 1; */ public long getNextDirectiveId() { return nextDirectiveId_; } public static final int NUMPOOLS_FIELD_NUMBER = 2; private int numPools_; /** * required uint32 numPools = 2; */ public boolean hasNumPools() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 numPools = 2; */ public int getNumPools() { return numPools_; } public static final int NUMDIRECTIVES_FIELD_NUMBER = 3; private int numDirectives_; /** *
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * 
* * required uint32 numDirectives = 3; */ public boolean hasNumDirectives() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * 
* * required uint32 numDirectives = 3; */ public int getNumDirectives() { return numDirectives_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasNextDirectiveId()) { memoizedIsInitialized = 0; return false; } if (!hasNumPools()) { memoizedIsInitialized = 0; return false; } if (!hasNumDirectives()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, nextDirectiveId_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, numPools_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt32(3, numDirectives_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, nextDirectiveId_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, numPools_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(3, numDirectives_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) obj; if (hasNextDirectiveId() != other.hasNextDirectiveId()) return false; if (hasNextDirectiveId()) { if (getNextDirectiveId() != other.getNextDirectiveId()) return false; } if (hasNumPools() != other.hasNumPools()) return false; if (hasNumPools()) { if (getNumPools() != other.getNumPools()) return false; } if (hasNumDirectives() != other.hasNumDirectives()) return false; if (hasNumDirectives()) { if (getNumDirectives() != other.getNumDirectives()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasNextDirectiveId()) { hash = (37 * hash) + NEXTDIRECTIVEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getNextDirectiveId()); } if (hasNumPools()) { hash = (37 * hash) + NUMPOOLS_FIELD_NUMBER; hash = (53 * hash) + getNumPools(); } if (hasNumDirectives()) { hash = (37 * hash) + NUMDIRECTIVES_FIELD_NUMBER; hash = (53 * hash) + getNumDirectives(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.CacheManagerSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.CacheManagerSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); nextDirectiveId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); numPools_ = 0; bitField0_ = (bitField0_ & ~0x00000002); numDirectives_ = 0; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.nextDirectiveId_ = nextDirectiveId_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.numPools_ = numPools_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { result.numDirectives_ = numDirectives_; to_bitField0_ |= 0x00000004; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.getDefaultInstance()) return this; if (other.hasNextDirectiveId()) { setNextDirectiveId(other.getNextDirectiveId()); } if (other.hasNumPools()) { setNumPools(other.getNumPools()); } if (other.hasNumDirectives()) { setNumDirectives(other.getNumDirectives()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasNextDirectiveId()) { return false; } if (!hasNumPools()) { return false; } if (!hasNumDirectives()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long nextDirectiveId_ ; /** * required uint64 nextDirectiveId = 1; */ public boolean hasNextDirectiveId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 nextDirectiveId = 1; */ public long getNextDirectiveId() { return nextDirectiveId_; } /** * required uint64 nextDirectiveId = 1; */ public Builder setNextDirectiveId(long value) { bitField0_ |= 0x00000001; nextDirectiveId_ = value; onChanged(); return this; } /** * required uint64 nextDirectiveId = 1; */ public Builder clearNextDirectiveId() { bitField0_ = (bitField0_ & ~0x00000001); nextDirectiveId_ = 0L; onChanged(); return this; } private int numPools_ ; /** * required uint32 numPools = 2; */ public boolean hasNumPools() { return ((bitField0_ & 0x00000002) != 0); } /** * required uint32 numPools = 2; */ public int getNumPools() { return numPools_; } /** * required uint32 numPools = 2; */ public Builder setNumPools(int value) { bitField0_ |= 0x00000002; numPools_ = value; onChanged(); return this; } /** * required uint32 numPools = 2; */ public Builder clearNumPools() { bitField0_ = (bitField0_ & ~0x00000002); numPools_ = 0; onChanged(); return this; } private int numDirectives_ ; /** *
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * 
* * required uint32 numDirectives = 3; */ public boolean hasNumDirectives() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * 
* * required uint32 numDirectives = 3; */ public int getNumDirectives() { return numDirectives_; } /** *
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * 
* * required uint32 numDirectives = 3; */ public Builder setNumDirectives(int value) { bitField0_ |= 0x00000004; numDirectives_ = value; onChanged(); return this; } /** *
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * 
* * required uint32 numDirectives = 3; */ public Builder clearNumDirectives() { bitField0_ = (bitField0_ & ~0x00000004); numDirectives_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.CacheManagerSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.CacheManagerSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public CacheManagerSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new CacheManagerSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ErasureCodingSectionOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.ErasureCodingSection) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ java.util.List getPoliciesList(); /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicies(int index); /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ int getPoliciesCount(); /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ java.util.List getPoliciesOrBuilderList(); /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPoliciesOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.fsimage.ErasureCodingSection} */ public static final class ErasureCodingSection extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.ErasureCodingSection) ErasureCodingSectionOrBuilder { private static final long serialVersionUID = 0L; // Use ErasureCodingSection.newBuilder() to construct. private ErasureCodingSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ErasureCodingSection() { policies_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ErasureCodingSection( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { policies_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } policies_.add( input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry)); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { policies_ = java.util.Collections.unmodifiableList(policies_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.Builder.class); } public static final int POLICIES_FIELD_NUMBER = 1; private java.util.List policies_; /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public java.util.List getPoliciesList() { return policies_; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public java.util.List getPoliciesOrBuilderList() { return policies_; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public int getPoliciesCount() { return policies_.size(); } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicies(int index) { return policies_.get(index); } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPoliciesOrBuilder( int index) { return policies_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getPoliciesCount(); i++) { if (!getPolicies(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < policies_.size(); i++) { output.writeMessage(1, policies_.get(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < policies_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, policies_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection)) { return super.equals(obj); } org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection) obj; if (!getPoliciesList() .equals(other.getPoliciesList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getPoliciesCount() > 0) { hash = (37 * hash) + POLICIES_FIELD_NUMBER; hash = (53 * hash) + getPoliciesList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.fsimage.ErasureCodingSection} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.ErasureCodingSection) org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSectionOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.Builder.class); } // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getPoliciesFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (policiesBuilder_ == null) { policies_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { policiesBuilder_.clear(); } return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection getDefaultInstanceForType() { return org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection build() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection(this); int from_bitField0_ = bitField0_; if (policiesBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { policies_ = java.util.Collections.unmodifiableList(policies_); bitField0_ = (bitField0_ & ~0x00000001); } result.policies_ = policies_; } else { result.policies_ = policiesBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection) { return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.getDefaultInstance()) return this; if (policiesBuilder_ == null) { if (!other.policies_.isEmpty()) { if (policies_.isEmpty()) { policies_ = other.policies_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensurePoliciesIsMutable(); policies_.addAll(other.policies_); } onChanged(); } } else { if (!other.policies_.isEmpty()) { if (policiesBuilder_.isEmpty()) { policiesBuilder_.dispose(); policiesBuilder_ = null; policies_ = other.policies_; bitField0_ = (bitField0_ & ~0x00000001); policiesBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getPoliciesFieldBuilder() : null; } else { policiesBuilder_.addAllMessages(other.policies_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { for (int i = 0; i < getPoliciesCount(); i++) { if (!getPolicies(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List policies_ = java.util.Collections.emptyList(); private void ensurePoliciesIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { policies_ = new java.util.ArrayList(policies_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> policiesBuilder_; /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public java.util.List getPoliciesList() { if (policiesBuilder_ == null) { return java.util.Collections.unmodifiableList(policies_); } else { return policiesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public int getPoliciesCount() { if (policiesBuilder_ == null) { return policies_.size(); } else { return policiesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicies(int index) { if (policiesBuilder_ == null) { return policies_.get(index); } else { return policiesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder setPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (policiesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePoliciesIsMutable(); policies_.set(index, value); onChanged(); } else { policiesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder setPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.set(index, builderForValue.build()); onChanged(); } else { policiesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder addPolicies(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (policiesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePoliciesIsMutable(); policies_.add(value); onChanged(); } else { policiesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder addPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) { if (policiesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePoliciesIsMutable(); policies_.add(index, value); onChanged(); } else { policiesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder addPolicies( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.add(builderForValue.build()); onChanged(); } else { policiesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder addPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.add(index, builderForValue.build()); onChanged(); } else { policiesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder addAllPolicies( java.lang.Iterable values) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, policies_); onChanged(); } else { policiesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder clearPolicies() { if (policiesBuilder_ == null) { policies_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { policiesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public Builder removePolicies(int index) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.remove(index); onChanged(); } else { policiesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getPoliciesBuilder( int index) { return getPoliciesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPoliciesOrBuilder( int index) { if (policiesBuilder_ == null) { return policies_.get(index); } else { return policiesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public java.util.List getPoliciesOrBuilderList() { if (policiesBuilder_ != null) { return policiesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(policies_); } } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder addPoliciesBuilder() { return getPoliciesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder addPoliciesBuilder( int index) { return getPoliciesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1; */ public java.util.List getPoliciesBuilderList() { return getPoliciesFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> getPoliciesFieldBuilder() { if (policiesBuilder_ == null) { policiesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>( policies_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); policies_ = null; } return policiesBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.ErasureCodingSection) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.ErasureCodingSection) private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection(); } public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ErasureCodingSection parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ErasureCodingSection(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable; public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\rfsimage.proto\022\023hadoop.hdfs.fsimage\032\nhd" + "fs.proto\032\tacl.proto\032\013xattr.proto\"\277\001\n\013Fil" + "eSummary\022\025\n\rondiskVersion\030\001 \002(\r\022\025\n\rlayou" + "tVersion\030\002 \002(\r\022\r\n\005codec\030\003 \001(\t\022:\n\010section" + "s\030\004 \003(\0132(.hadoop.hdfs.fsimage.FileSummar" + "y.Section\0327\n\007Section\022\014\n\004name\030\001 \001(\t\022\016\n\006le" + "ngth\030\002 \001(\004\022\016\n\006offset\030\003 \001(\004\"\344\001\n\021NameSyste" + "mSection\022\023\n\013namespaceId\030\001 \001(\r\022\022\n\ngenstam" + "pV1\030\002 \001(\004\022\022\n\ngenstampV2\030\003 \001(\004\022\027\n\017genstam" + "pV1Limit\030\004 \001(\004\022\034\n\024lastAllocatedBlockId\030\005" + " \001(\004\022\025\n\rtransactionId\030\006 \001(\004\022\037\n\027rollingUp" + "gradeStartTime\030\007 \001(\004\022#\n\033lastAllocatedStr" + "ipedBlockId\030\010 \001(\004\"\340\r\n\014INodeSection\022\023\n\013la" + "stInodeId\030\001 \001(\004\022\021\n\tnumInodes\030\002 \001(\004\032I\n\034Fi" + "leUnderConstructionFeature\022\022\n\nclientName" + "\030\001 \001(\t\022\025\n\rclientMachine\030\002 \001(\t\032&\n\017AclFeat" + "ureProto\022\023\n\007entries\030\002 \003(\007B\002\020\001\0320\n\021XAttrCo" + "mpactProto\022\014\n\004name\030\001 \002(\007\022\r\n\005value\030\002 \001(\014\032" + "X\n\021XAttrFeatureProto\022C\n\006xAttrs\030\001 \003(\01323.h" + "adoop.hdfs.fsimage.INodeSection.XAttrCom" + "pactProto\032\344\003\n\tINodeFile\022\023\n\013replication\030\001" + " \001(\r\022\030\n\020modificationTime\030\002 \001(\004\022\022\n\naccess" + "Time\030\003 \001(\004\022\032\n\022preferredBlockSize\030\004 \001(\004\022\022" + "\n\npermission\030\005 \001(\006\022\'\n\006blocks\030\006 \003(\0132\027.had" + "oop.hdfs.BlockProto\022N\n\006fileUC\030\007 \001(\0132>.ha" + "doop.hdfs.fsimage.INodeSection.FileUnder" + "ConstructionFeature\022>\n\003acl\030\010 \001(\01321.hadoo" + "p.hdfs.fsimage.INodeSection.AclFeaturePr" + "oto\022C\n\006xAttrs\030\t \001(\01323.hadoop.hdfs.fsimag" + "e.INodeSection.XAttrFeatureProto\022\027\n\017stor" + "agePolicyID\030\n \001(\r\022.\n\tblockType\030\013 \001(\0162\033.h" + "adoop.hdfs.BlockTypeProto\022\035\n\025erasureCodi" + "ngPolicyID\030\014 \001(\r\032a\n\034QuotaByStorageTypeEn" + "tryProto\0222\n\013storageType\030\001 \002(\0162\035.hadoop.h" + "dfs.StorageTypeProto\022\r\n\005quota\030\002 \002(\004\032p\n\036Q" + "uotaByStorageTypeFeatureProto\022N\n\006quotas\030" + "\001 \003(\0132>.hadoop.hdfs.fsimage.INodeSection" + ".QuotaByStorageTypeEntryProto\032\273\002\n\016INodeD" + "irectory\022\030\n\020modificationTime\030\001 \001(\004\022\017\n\007ns" + "Quota\030\002 \001(\004\022\017\n\007dsQuota\030\003 \001(\004\022\022\n\npermissi" + "on\030\004 \001(\006\022>\n\003acl\030\005 \001(\01321.hadoop.hdfs.fsim" + "age.INodeSection.AclFeatureProto\022C\n\006xAtt" + "rs\030\006 \001(\01323.hadoop.hdfs.fsimage.INodeSect" + "ion.XAttrFeatureProto\022T\n\ntypeQuotas\030\007 \001(" + "\[email protected]" + "taByStorageTypeFeatureProto\032`\n\014INodeSyml" + "ink\022\022\n\npermission\030\001 \001(\006\022\016\n\006target\030\002 \001(\014\022" + "\030\n\020modificationTime\030\003 \001(\004\022\022\n\naccessTime\030" + "\004 \001(\004\032\314\002\n\005INode\022:\n\004type\030\001 \002(\0162,.hadoop.h" + "dfs.fsimage.INodeSection.INode.Type\022\n\n\002i" + "d\030\002 \002(\004\022\014\n\004name\030\003 \001(\014\0229\n\004file\030\004 \001(\0132+.ha" + "doop.hdfs.fsimage.INodeSection.INodeFile" + "\022C\n\tdirectory\030\005 \001(\01320.hadoop.hdfs.fsimag" + "e.INodeSection.INodeDirectory\022?\n\007symlink" + "\030\006 \001(\0132..hadoop.hdfs.fsimage.INodeSectio" + "n.INodeSymlink\",\n\004Type\022\010\n\004FILE\020\001\022\r\n\tDIRE" + "CTORY\020\002\022\013\n\007SYMLINK\020\003\"`\n\035FilesUnderConstr" + "uctionSection\032?\n\032FileUnderConstructionEn" + "try\022\017\n\007inodeId\030\001 \001(\004\022\020\n\010fullPath\030\002 \001(\t\"b" + "\n\025INodeDirectorySection\032I\n\010DirEntry\022\016\n\006p" + "arent\030\001 \001(\004\022\024\n\010children\030\002 \003(\004B\002\020\001\022\027\n\013ref" + "Children\030\003 \003(\rB\002\020\001\"z\n\025INodeReferenceSect" + "ion\032a\n\016INodeReference\022\022\n\nreferredId\030\001 \001(" + "\004\022\014\n\004name\030\002 \001(\014\022\025\n\rdstSnapshotId\030\003 \001(\r\022\026" + "\n\016lastSnapshotId\030\004 \001(\r\"\265\001\n\017SnapshotSecti" + "on\022\027\n\017snapshotCounter\030\001 \001(\r\022\034\n\020snapshott" + "ableDir\030\002 \003(\004B\002\020\001\022\024\n\014numSnapshots\030\003 \001(\r\032" + "U\n\010Snapshot\022\022\n\nsnapshotId\030\001 \001(\r\0225\n\004root\030" + "\002 \001(\0132\'.hadoop.hdfs.fsimage.INodeSection" + ".INode\"\200\005\n\023SnapshotDiffSection\032 \n\020Create" + "dListEntry\022\014\n\004name\030\001 \001(\014\032\367\001\n\rDirectoryDi" + "ff\022\022\n\nsnapshotId\030\001 \001(\r\022\024\n\014childrenSize\030\002" + " \001(\r\022\026\n\016isSnapshotRoot\030\003 \001(\010\022\014\n\004name\030\004 \001" + "(\014\022F\n\014snapshotCopy\030\005 \001(\01320.hadoop.hdfs.f" + "simage.INodeSection.INodeDirectory\022\027\n\017cr" + "eatedListSize\030\006 \001(\r\022\030\n\014deletedINode\030\007 \003(" + "\004B\002\020\001\022\033\n\017deletedINodeRef\030\010 \003(\rB\002\020\001\032\252\001\n\010F" + "ileDiff\022\022\n\nsnapshotId\030\001 \001(\r\022\020\n\010fileSize\030" + "\002 \001(\004\022\014\n\004name\030\003 \001(\014\022A\n\014snapshotCopy\030\004 \001(" + "\0132+.hadoop.hdfs.fsimage.INodeSection.INo" + "deFile\022\'\n\006blocks\030\005 \003(\0132\027.hadoop.hdfs.Blo" + "ckProto\032\237\001\n\tDiffEntry\022E\n\004type\030\001 \002(\01627.ha" + "doop.hdfs.fsimage.SnapshotDiffSection.Di" + "ffEntry.Type\022\017\n\007inodeId\030\002 \001(\004\022\021\n\tnumOfDi" + "ff\030\003 \001(\r\"\'\n\004Type\022\014\n\010FILEDIFF\020\001\022\021\n\rDIRECT" + "ORYDIFF\020\002\"]\n\022StringTableSection\022\020\n\010numEn" + "try\030\001 \001(\r\022\023\n\010maskBits\030\002 \001(\r:\0010\032 \n\005Entry\022" + "\n\n\002id\030\001 \001(\r\022\013\n\003str\030\002 \001(\t\"\341\002\n\024SecretManag" + "erSection\022\021\n\tcurrentId\030\001 \001(\r\022\033\n\023tokenSeq" + "uenceNumber\030\002 \001(\r\022\017\n\007numKeys\030\003 \001(\r\022\021\n\tnu" + "mTokens\030\004 \001(\r\032<\n\rDelegationKey\022\n\n\002id\030\001 \001" + "(\r\022\022\n\nexpiryDate\030\002 \001(\004\022\013\n\003key\030\003 \001(\014\032\266\001\n\014" + "PersistToken\022\017\n\007version\030\001 \001(\r\022\r\n\005owner\030\002" + " \001(\t\022\017\n\007renewer\030\003 \001(\t\022\020\n\010realUser\030\004 \001(\t\022" + "\021\n\tissueDate\030\005 \001(\004\022\017\n\007maxDate\030\006 \001(\004\022\026\n\016s" + "equenceNumber\030\007 \001(\r\022\023\n\013masterKeyId\030\010 \001(\r" + "\022\022\n\nexpiryDate\030\t \001(\004\"W\n\023CacheManagerSect" + "ion\022\027\n\017nextDirectiveId\030\001 \002(\004\022\020\n\010numPools" + "\030\002 \002(\r\022\025\n\rnumDirectives\030\003 \002(\r\"O\n\024Erasure" + "CodingSection\0227\n\010policies\030\001 \003(\0132%.hadoop" + ".hdfs.ErasureCodingPolicyProtoB6\n&org.ap" + "ache.hadoop.hdfs.server.namenodeB\014FsImag" + "eProto" }; org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { public org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor root) { descriptor = root; return null; } }; org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor(), }, assigner); internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor, new java.lang.String[] { "OndiskVersion", "LayoutVersion", "Codec", "Sections", }); internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor = internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor, new java.lang.String[] { "Name", "Length", "Offset", }); internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor, new java.lang.String[] { "NamespaceId", "GenstampV1", "GenstampV2", "GenstampV1Limit", "LastAllocatedBlockId", "TransactionId", "RollingUpgradeStartTime", "LastAllocatedStripedBlockId", }); internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor, new java.lang.String[] { "LastInodeId", "NumInodes", }); internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor, new java.lang.String[] { "ClientName", "ClientMachine", }); internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(1); internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor, new java.lang.String[] { "Entries", }); internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(2); internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(3); internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor, new java.lang.String[] { "XAttrs", }); internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(4); internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor, new java.lang.String[] { "Replication", "ModificationTime", "AccessTime", "PreferredBlockSize", "Permission", "Blocks", "FileUC", "Acl", "XAttrs", "StoragePolicyID", "BlockType", "ErasureCodingPolicyID", }); internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(5); internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor, new java.lang.String[] { "StorageType", "Quota", }); internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(6); internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor, new java.lang.String[] { "Quotas", }); internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(7); internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor, new java.lang.String[] { "ModificationTime", "NsQuota", "DsQuota", "Permission", "Acl", "XAttrs", "TypeQuotas", }); internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(8); internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor, new java.lang.String[] { "Permission", "Target", "ModificationTime", "AccessTime", }); internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor = internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(9); internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor, new java.lang.String[] { "Type", "Id", "Name", "File", "Directory", "Symlink", }); internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor = internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor, new java.lang.String[] { "InodeId", "FullPath", }); internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor = internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor, new java.lang.String[] { "Parent", "Children", "RefChildren", }); internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor = internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor, new java.lang.String[] { "ReferredId", "Name", "DstSnapshotId", "LastSnapshotId", }); internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor, new java.lang.String[] { "SnapshotCounter", "SnapshottableDir", "NumSnapshots", }); internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor = internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor, new java.lang.String[] { "SnapshotId", "Root", }); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor = internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor, new java.lang.String[] { "Name", }); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor = internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(1); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor, new java.lang.String[] { "SnapshotId", "ChildrenSize", "IsSnapshotRoot", "Name", "SnapshotCopy", "CreatedListSize", "DeletedINode", "DeletedINodeRef", }); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor = internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(2); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor, new java.lang.String[] { "SnapshotId", "FileSize", "Name", "SnapshotCopy", "Blocks", }); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor = internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(3); internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor, new java.lang.String[] { "Type", "InodeId", "NumOfDiff", }); internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor, new java.lang.String[] { "NumEntry", "MaskBits", }); internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor = internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor, new java.lang.String[] { "Id", "Str", }); internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor, new java.lang.String[] { "CurrentId", "TokenSequenceNumber", "NumKeys", "NumTokens", }); internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor = internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor.getNestedTypes().get(0); internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor, new java.lang.String[] { "Id", "ExpiryDate", "Key", }); internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor = internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor.getNestedTypes().get(1); internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor, new java.lang.String[] { "Version", "Owner", "Renewer", "RealUser", "IssueDate", "MaxDate", "SequenceNumber", "MasterKeyId", "ExpiryDate", }); internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor, new java.lang.String[] { "NextDirectiveId", "NumPools", "NumDirectives", }); internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor = getDescriptor().getMessageTypes().get(11); internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor, new java.lang.String[] { "Policies", }); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(); org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(); org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy