All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos Maven / Gradle / Ivy

There is a newer version: 3.4.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: NamenodeProtocol.proto

package org.apache.hadoop.hdfs.protocol.proto;

public final class NamenodeProtocolProtos {
  private NamenodeProtocolProtos() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  public interface GetBlocksRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetBlocksRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * 
     * Datanode ID
     * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ boolean hasDatanode(); /** *
     * Datanode ID
     * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode(); /** *
     * Datanode ID
     * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder(); /** *
     * Size in bytes
     * 
* * required uint64 size = 2; */ boolean hasSize(); /** *
     * Size in bytes
     * 
* * required uint64 size = 2; */ long getSize(); /** *
     * Minimum Block Size in bytes, adding default value to 10MB, as this might
     * cause problem during rolling upgrade, when balancers are upgraded later.
     * For more info refer HDFS-13356
     * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ boolean hasMinBlockSize(); /** *
     * Minimum Block Size in bytes, adding default value to 10MB, as this might
     * cause problem during rolling upgrade, when balancers are upgraded later.
     * For more info refer HDFS-13356
     * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ long getMinBlockSize(); } /** *
   **
   * Get list of blocks for a given datanode with the total length 
   * of adding up to given size
   * datanode - Datanode ID to get list of block from
   * size - size to which the block lengths must add up to
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlocksRequestProto} */ public static final class GetBlocksRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetBlocksRequestProto) GetBlocksRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetBlocksRequestProto.newBuilder() to construct. private GetBlocksRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetBlocksRequestProto() { minBlockSize_ = 10485760L; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetBlocksRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = datanode_.toBuilder(); } datanode_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(datanode_); datanode_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; size_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; minBlockSize_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.Builder.class); } private int bitField0_; public static final int DATANODE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanode_; /** *
     * Datanode ID
     * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public boolean hasDatanode() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Datanode ID
     * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode() { return datanode_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanode_; } /** *
     * Datanode ID
     * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder() { return datanode_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanode_; } public static final int SIZE_FIELD_NUMBER = 2; private long size_; /** *
     * Size in bytes
     * 
* * required uint64 size = 2; */ public boolean hasSize() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * Size in bytes
     * 
* * required uint64 size = 2; */ public long getSize() { return size_; } public static final int MINBLOCKSIZE_FIELD_NUMBER = 3; private long minBlockSize_; /** *
     * Minimum Block Size in bytes, adding default value to 10MB, as this might
     * cause problem during rolling upgrade, when balancers are upgraded later.
     * For more info refer HDFS-13356
     * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ public boolean hasMinBlockSize() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * Minimum Block Size in bytes, adding default value to 10MB, as this might
     * cause problem during rolling upgrade, when balancers are upgraded later.
     * For more info refer HDFS-13356
     * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ public long getMinBlockSize() { return minBlockSize_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasDatanode()) { memoizedIsInitialized = 0; return false; } if (!hasSize()) { memoizedIsInitialized = 0; return false; } if (!getDatanode().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getDatanode()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt64(2, size_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeUInt64(3, minBlockSize_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getDatanode()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(2, size_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(3, minBlockSize_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto) obj; if (hasDatanode() != other.hasDatanode()) return false; if (hasDatanode()) { if (!getDatanode() .equals(other.getDatanode())) return false; } if (hasSize() != other.hasSize()) return false; if (hasSize()) { if (getSize() != other.getSize()) return false; } if (hasMinBlockSize() != other.hasMinBlockSize()) return false; if (hasMinBlockSize()) { if (getMinBlockSize() != other.getMinBlockSize()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasDatanode()) { hash = (37 * hash) + DATANODE_FIELD_NUMBER; hash = (53 * hash) + getDatanode().hashCode(); } if (hasSize()) { hash = (37 * hash) + SIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSize()); } if (hasMinBlockSize()) { hash = (37 * hash) + MINBLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getMinBlockSize()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Get list of blocks for a given datanode with the total length 
     * of adding up to given size
     * datanode - Datanode ID to get list of block from
     * size - size to which the block lengths must add up to
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlocksRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetBlocksRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getDatanodeFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (datanodeBuilder_ == null) { datanode_ = null; } else { datanodeBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); size_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); minBlockSize_ = 10485760L; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (datanodeBuilder_ == null) { result.datanode_ = datanode_; } else { result.datanode_ = datanodeBuilder_.build(); } to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.size_ = size_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.minBlockSize_ = minBlockSize_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance()) return this; if (other.hasDatanode()) { mergeDatanode(other.getDatanode()); } if (other.hasSize()) { setSize(other.getSize()); } if (other.hasMinBlockSize()) { setMinBlockSize(other.getMinBlockSize()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasDatanode()) { return false; } if (!hasSize()) { return false; } if (!getDatanode().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanode_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeBuilder_; /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public boolean hasDatanode() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode() { if (datanodeBuilder_ == null) { return datanode_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanode_; } else { return datanodeBuilder_.getMessage(); } } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public Builder setDatanode(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (datanodeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } datanode_ = value; onChanged(); } else { datanodeBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public Builder setDatanode( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { if (datanodeBuilder_ == null) { datanode_ = builderForValue.build(); onChanged(); } else { datanodeBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public Builder mergeDatanode(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (datanodeBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && datanode_ != null && datanode_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(datanode_).mergeFrom(value).buildPartial(); } else { datanode_ = value; } onChanged(); } else { datanodeBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public Builder clearDatanode() { if (datanodeBuilder_ == null) { datanode_ = null; onChanged(); } else { datanodeBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDatanodeFieldBuilder().getBuilder(); } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder() { if (datanodeBuilder_ != null) { return datanodeBuilder_.getMessageOrBuilder(); } else { return datanode_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanode_; } } /** *
       * Datanode ID
       * 
* * required .hadoop.hdfs.DatanodeIDProto datanode = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> getDatanodeFieldBuilder() { if (datanodeBuilder_ == null) { datanodeBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( getDatanode(), getParentForChildren(), isClean()); datanode_ = null; } return datanodeBuilder_; } private long size_ ; /** *
       * Size in bytes
       * 
* * required uint64 size = 2; */ public boolean hasSize() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Size in bytes
       * 
* * required uint64 size = 2; */ public long getSize() { return size_; } /** *
       * Size in bytes
       * 
* * required uint64 size = 2; */ public Builder setSize(long value) { bitField0_ |= 0x00000002; size_ = value; onChanged(); return this; } /** *
       * Size in bytes
       * 
* * required uint64 size = 2; */ public Builder clearSize() { bitField0_ = (bitField0_ & ~0x00000002); size_ = 0L; onChanged(); return this; } private long minBlockSize_ = 10485760L; /** *
       * Minimum Block Size in bytes, adding default value to 10MB, as this might
       * cause problem during rolling upgrade, when balancers are upgraded later.
       * For more info refer HDFS-13356
       * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ public boolean hasMinBlockSize() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * Minimum Block Size in bytes, adding default value to 10MB, as this might
       * cause problem during rolling upgrade, when balancers are upgraded later.
       * For more info refer HDFS-13356
       * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ public long getMinBlockSize() { return minBlockSize_; } /** *
       * Minimum Block Size in bytes, adding default value to 10MB, as this might
       * cause problem during rolling upgrade, when balancers are upgraded later.
       * For more info refer HDFS-13356
       * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ public Builder setMinBlockSize(long value) { bitField0_ |= 0x00000004; minBlockSize_ = value; onChanged(); return this; } /** *
       * Minimum Block Size in bytes, adding default value to 10MB, as this might
       * cause problem during rolling upgrade, when balancers are upgraded later.
       * For more info refer HDFS-13356
       * 
* * optional uint64 minBlockSize = 3 [default = 10485760]; */ public Builder clearMinBlockSize() { bitField0_ = (bitField0_ & ~0x00000004); minBlockSize_ = 10485760L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetBlocksRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetBlocksRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetBlocksRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetBlocksRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetBlocksResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetBlocksResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * List of blocks
     * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ boolean hasBlocks(); /** *
     * List of blocks
     * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto getBlocks(); /** *
     * List of blocks
     * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProtoOrBuilder getBlocksOrBuilder(); } /** *
   **
   * blocks - List of returned blocks
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlocksResponseProto} */ public static final class GetBlocksResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetBlocksResponseProto) GetBlocksResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetBlocksResponseProto.newBuilder() to construct. private GetBlocksResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetBlocksResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetBlocksResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = blocks_.toBuilder(); } blocks_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(blocks_); blocks_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.Builder.class); } private int bitField0_; public static final int BLOCKS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto blocks_; /** *
     * List of blocks
     * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public boolean hasBlocks() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * List of blocks
     * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto getBlocks() { return blocks_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.getDefaultInstance() : blocks_; } /** *
     * List of blocks
     * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProtoOrBuilder getBlocksOrBuilder() { return blocks_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.getDefaultInstance() : blocks_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasBlocks()) { memoizedIsInitialized = 0; return false; } if (!getBlocks().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getBlocks()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getBlocks()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) obj; if (hasBlocks() != other.hasBlocks()) return false; if (hasBlocks()) { if (!getBlocks() .equals(other.getBlocks())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasBlocks()) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocks().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * blocks - List of returned blocks
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlocksResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetBlocksResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getBlocksFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (blocksBuilder_ == null) { blocks_ = null; } else { blocksBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (blocksBuilder_ == null) { result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance()) return this; if (other.hasBlocks()) { mergeBlocks(other.getBlocks()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasBlocks()) { return false; } if (!getBlocks().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto blocks_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProtoOrBuilder> blocksBuilder_; /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public boolean hasBlocks() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto getBlocks() { if (blocksBuilder_ == null) { return blocks_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.getDefaultInstance() : blocks_; } else { return blocksBuilder_.getMessage(); } } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public Builder setBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } blocks_ = value; onChanged(); } else { blocksBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public Builder setBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder builderForValue) { if (blocksBuilder_ == null) { blocks_ = builderForValue.build(); onChanged(); } else { blocksBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public Builder mergeBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto value) { if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && blocks_ != null && blocks_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.getDefaultInstance()) { blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.newBuilder(blocks_).mergeFrom(value).buildPartial(); } else { blocks_ = value; } onChanged(); } else { blocksBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = null; onChanged(); } else { blocksBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder getBlocksBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBlocksFieldBuilder().getBuilder(); } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProtoOrBuilder getBlocksOrBuilder() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilder(); } else { return blocks_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.getDefaultInstance() : blocks_; } } /** *
       * List of blocks
       * 
* * required .hadoop.hdfs.BlocksWithLocationsProto blocks = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProtoOrBuilder>( getBlocks(), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetBlocksResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetBlocksResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetBlocksResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetBlocksResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetBlockKeysRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetBlockKeysRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void request
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlockKeysRequestProto} */ public static final class GetBlockKeysRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetBlockKeysRequestProto) GetBlockKeysRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetBlockKeysRequestProto.newBuilder() to construct. private GetBlockKeysRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetBlockKeysRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetBlockKeysRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void request
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlockKeysRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetBlockKeysRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetBlockKeysRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetBlockKeysRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetBlockKeysRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetBlockKeysRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetBlockKeysResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetBlockKeysResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ boolean hasKeys(); /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys(); /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder(); } /** *
   **
   * keys - Information about block keys at the active namenode
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlockKeysResponseProto} */ public static final class GetBlockKeysResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetBlockKeysResponseProto) GetBlockKeysResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetBlockKeysResponseProto.newBuilder() to construct. private GetBlockKeysResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetBlockKeysResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetBlockKeysResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = keys_.toBuilder(); } keys_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(keys_); keys_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.Builder.class); } private int bitField0_; public static final int KEYS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_; /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public boolean hasKeys() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() { return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_; } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (hasKeys()) { if (!getKeys().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getKeys()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getKeys()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) obj; if (hasKeys() != other.hasKeys()) return false; if (hasKeys()) { if (!getKeys() .equals(other.getKeys())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasKeys()) { hash = (37 * hash) + KEYS_FIELD_NUMBER; hash = (53 * hash) + getKeys().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * keys - Information about block keys at the active namenode
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetBlockKeysResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetBlockKeysResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getKeysFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (keysBuilder_ == null) { keys_ = null; } else { keysBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (keysBuilder_ == null) { result.keys_ = keys_; } else { result.keys_ = keysBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance()) return this; if (other.hasKeys()) { mergeKeys(other.getKeys()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (hasKeys()) { if (!getKeys().isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_; /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public boolean hasKeys() { return ((bitField0_ & 0x00000001) != 0); } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() { if (keysBuilder_ == null) { return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_; } else { return keysBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) { if (keysBuilder_ == null) { if (value == null) { throw new NullPointerException(); } keys_ = value; onChanged(); } else { keysBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public Builder setKeys( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder builderForValue) { if (keysBuilder_ == null) { keys_ = builderForValue.build(); onChanged(); } else { keysBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) { if (keysBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && keys_ != null && keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance()) { keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial(); } else { keys_ = value; } onChanged(); } else { keysBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public Builder clearKeys() { if (keysBuilder_ == null) { keys_ = null; onChanged(); } else { keysBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder getKeysBuilder() { bitField0_ |= 0x00000001; onChanged(); return getKeysFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { if (keysBuilder_ != null) { return keysBuilder_.getMessageOrBuilder(); } else { return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_; } } /** * optional .hadoop.hdfs.ExportedBlockKeysProto keys = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> getKeysFieldBuilder() { if (keysBuilder_ == null) { keysBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder>( getKeys(), getParentForChildren(), isClean()); keys_ = null; } return keysBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetBlockKeysResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetBlockKeysResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetBlockKeysResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetBlockKeysResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetTransactionIdRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetTransactionIdRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void request
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetTransactionIdRequestProto} */ public static final class GetTransactionIdRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetTransactionIdRequestProto) GetTransactionIdRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetTransactionIdRequestProto.newBuilder() to construct. private GetTransactionIdRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetTransactionIdRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetTransactionIdRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void request
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetTransactionIdRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetTransactionIdRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetTransactionIdRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetTransactionIdRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetTransactionIdRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetTransactionIdRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetTransactionIdResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetTransactionIdResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Transaction ID
     * 
* * required uint64 txId = 1; */ boolean hasTxId(); /** *
     * Transaction ID
     * 
* * required uint64 txId = 1; */ long getTxId(); } /** *
   **
   * txId - Transaction ID of the most recently persisted edit log record
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetTransactionIdResponseProto} */ public static final class GetTransactionIdResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetTransactionIdResponseProto) GetTransactionIdResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetTransactionIdResponseProto.newBuilder() to construct. private GetTransactionIdResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetTransactionIdResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetTransactionIdResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; txId_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.Builder.class); } private int bitField0_; public static final int TXID_FIELD_NUMBER = 1; private long txId_; /** *
     * Transaction ID
     * 
* * required uint64 txId = 1; */ public boolean hasTxId() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Transaction ID
     * 
* * required uint64 txId = 1; */ public long getTxId() { return txId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasTxId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, txId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, txId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) obj; if (hasTxId() != other.hasTxId()) return false; if (hasTxId()) { if (getTxId() != other.getTxId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasTxId()) { hash = (37 * hash) + TXID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getTxId()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * txId - Transaction ID of the most recently persisted edit log record
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetTransactionIdResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetTransactionIdResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); txId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.txId_ = txId_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance()) return this; if (other.hasTxId()) { setTxId(other.getTxId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasTxId()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long txId_ ; /** *
       * Transaction ID
       * 
* * required uint64 txId = 1; */ public boolean hasTxId() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Transaction ID
       * 
* * required uint64 txId = 1; */ public long getTxId() { return txId_; } /** *
       * Transaction ID
       * 
* * required uint64 txId = 1; */ public Builder setTxId(long value) { bitField0_ |= 0x00000001; txId_ = value; onChanged(); return this; } /** *
       * Transaction ID
       * 
* * required uint64 txId = 1; */ public Builder clearTxId() { bitField0_ = (bitField0_ & ~0x00000001); txId_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetTransactionIdResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetTransactionIdResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetTransactionIdResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetTransactionIdResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RollEditLogRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.RollEditLogRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void request
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.RollEditLogRequestProto} */ public static final class RollEditLogRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.RollEditLogRequestProto) RollEditLogRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RollEditLogRequestProto.newBuilder() to construct. private RollEditLogRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RollEditLogRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollEditLogRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void request
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.RollEditLogRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.RollEditLogRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.RollEditLogRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.RollEditLogRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RollEditLogRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new RollEditLogRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RollEditLogResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.RollEditLogResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ boolean hasSignature(); /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature(); /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); } /** *
   **
   * signature - A unique token to identify checkpoint transaction
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.RollEditLogResponseProto} */ public static final class RollEditLogResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.RollEditLogResponseProto) RollEditLogResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RollEditLogResponseProto.newBuilder() to construct. private RollEditLogResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RollEditLogResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollEditLogResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = signature_.toBuilder(); } signature_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(signature_); signature_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.Builder.class); } private int bitField0_; public static final int SIGNATURE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto signature_; /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public boolean hasSignature() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature() { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSignature()) { memoizedIsInitialized = 0; return false; } if (!getSignature().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getSignature()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getSignature()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) obj; if (hasSignature() != other.hasSignature()) return false; if (hasSignature()) { if (!getSignature() .equals(other.getSignature())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSignature()) { hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; hash = (53 * hash) + getSignature().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * signature - A unique token to identify checkpoint transaction
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.RollEditLogResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.RollEditLogResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSignatureFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (signatureBuilder_ == null) { signature_ = null; } else { signatureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (signatureBuilder_ == null) { result.signature_ = signature_; } else { result.signature_ = signatureBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance()) return this; if (other.hasSignature()) { mergeSignature(other.getSignature()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSignature()) { return false; } if (!getSignature().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto signature_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public boolean hasSignature() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature() { if (signatureBuilder_ == null) { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } else { return signatureBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto value) { if (signatureBuilder_ == null) { if (value == null) { throw new NullPointerException(); } signature_ = value; onChanged(); } else { signatureBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public Builder setSignature( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder builderForValue) { if (signatureBuilder_ == null) { signature_ = builderForValue.build(); onChanged(); } else { signatureBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto value) { if (signatureBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && signature_ != null && signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance()) { signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); } else { signature_ = value; } onChanged(); } else { signatureBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public Builder clearSignature() { if (signatureBuilder_ == null) { signature_ = null; onChanged(); } else { signatureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { bitField0_ |= 0x00000001; onChanged(); return getSignatureFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { if (signatureBuilder_ != null) { return signatureBuilder_.getMessageOrBuilder(); } else { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder> getSignatureFieldBuilder() { if (signatureBuilder_ == null) { signatureBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder>( getSignature(), getParentForChildren(), isClean()); signature_ = null; } return signatureBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.RollEditLogResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.RollEditLogResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RollEditLogResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new RollEditLogResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetMostRecentCheckpointTxIdRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void request
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto} */ public static final class GetMostRecentCheckpointTxIdRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto) GetMostRecentCheckpointTxIdRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetMostRecentCheckpointTxIdRequestProto.newBuilder() to construct. private GetMostRecentCheckpointTxIdRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetMostRecentCheckpointTxIdRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetMostRecentCheckpointTxIdRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void request
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetMostRecentCheckpointTxIdRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetMostRecentCheckpointTxIdRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetMostRecentCheckpointTxIdResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 txId = 1; */ boolean hasTxId(); /** * required uint64 txId = 1; */ long getTxId(); } /** * Protobuf type {@code hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto} */ public static final class GetMostRecentCheckpointTxIdResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto) GetMostRecentCheckpointTxIdResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetMostRecentCheckpointTxIdResponseProto.newBuilder() to construct. private GetMostRecentCheckpointTxIdResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetMostRecentCheckpointTxIdResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetMostRecentCheckpointTxIdResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; txId_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.Builder.class); } private int bitField0_; public static final int TXID_FIELD_NUMBER = 1; private long txId_; /** * required uint64 txId = 1; */ public boolean hasTxId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 txId = 1; */ public long getTxId() { return txId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasTxId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, txId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, txId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto) obj; if (hasTxId() != other.hasTxId()) return false; if (hasTxId()) { if (getTxId() != other.getTxId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasTxId()) { hash = (37 * hash) + TXID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getTxId()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); txId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.txId_ = txId_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.getDefaultInstance()) return this; if (other.hasTxId()) { setTxId(other.getTxId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasTxId()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long txId_ ; /** * required uint64 txId = 1; */ public boolean hasTxId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 txId = 1; */ public long getTxId() { return txId_; } /** * required uint64 txId = 1; */ public Builder setTxId(long value) { bitField0_ |= 0x00000001; txId_ = value; onChanged(); return this; } /** * required uint64 txId = 1; */ public Builder clearTxId() { bitField0_ = (bitField0_ & ~0x00000001); txId_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetMostRecentCheckpointTxIdResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetMostRecentCheckpointTxIdResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ErrorReportRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.ErrorReportRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ boolean hasRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); /** *
     * Error code
     * 
* * required uint32 errorCode = 2; */ boolean hasErrorCode(); /** *
     * Error code
     * 
* * required uint32 errorCode = 2; */ int getErrorCode(); /** *
     * Error message
     * 
* * required string msg = 3; */ boolean hasMsg(); /** *
     * Error message
     * 
* * required string msg = 3; */ java.lang.String getMsg(); /** *
     * Error message
     * 
* * required string msg = 3; */ org.apache.hadoop.thirdparty.protobuf.ByteString getMsgBytes(); } /** *
   **
   * registration - Namenode reporting the error
   * errorCode - error code indicating the error
   * msg - Free text description of the error
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.ErrorReportRequestProto} */ public static final class ErrorReportRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.ErrorReportRequestProto) ErrorReportRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ErrorReportRequestProto.newBuilder() to construct. private ErrorReportRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ErrorReportRequestProto() { msg_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ErrorReportRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = registration_.toBuilder(); } registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(registration_); registration_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; errorCode_ = input.readUInt32(); break; } case 26: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; msg_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.Builder.class); } private int bitField0_; public static final int REGISTRATION_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } public static final int ERRORCODE_FIELD_NUMBER = 2; private int errorCode_; /** *
     * Error code
     * 
* * required uint32 errorCode = 2; */ public boolean hasErrorCode() { return ((bitField0_ & 0x00000002) != 0); } /** *
     * Error code
     * 
* * required uint32 errorCode = 2; */ public int getErrorCode() { return errorCode_; } public static final int MSG_FIELD_NUMBER = 3; private volatile java.lang.Object msg_; /** *
     * Error message
     * 
* * required string msg = 3; */ public boolean hasMsg() { return ((bitField0_ & 0x00000004) != 0); } /** *
     * Error message
     * 
* * required string msg = 3; */ public java.lang.String getMsg() { java.lang.Object ref = msg_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { msg_ = s; } return s; } } /** *
     * Error message
     * 
* * required string msg = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getMsgBytes() { java.lang.Object ref = msg_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); msg_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasRegistration()) { memoizedIsInitialized = 0; return false; } if (!hasErrorCode()) { memoizedIsInitialized = 0; return false; } if (!hasMsg()) { memoizedIsInitialized = 0; return false; } if (!getRegistration().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getRegistration()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeUInt32(2, errorCode_); } if (((bitField0_ & 0x00000004) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, msg_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getRegistration()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt32Size(2, errorCode_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, msg_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto) obj; if (hasRegistration() != other.hasRegistration()) return false; if (hasRegistration()) { if (!getRegistration() .equals(other.getRegistration())) return false; } if (hasErrorCode() != other.hasErrorCode()) return false; if (hasErrorCode()) { if (getErrorCode() != other.getErrorCode()) return false; } if (hasMsg() != other.hasMsg()) return false; if (hasMsg()) { if (!getMsg() .equals(other.getMsg())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasRegistration()) { hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; hash = (53 * hash) + getRegistration().hashCode(); } if (hasErrorCode()) { hash = (37 * hash) + ERRORCODE_FIELD_NUMBER; hash = (53 * hash) + getErrorCode(); } if (hasMsg()) { hash = (37 * hash) + MSG_FIELD_NUMBER; hash = (53 * hash) + getMsg().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * registration - Namenode reporting the error
     * errorCode - error code indicating the error
     * msg - Free text description of the error
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.ErrorReportRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.ErrorReportRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getRegistrationFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (registrationBuilder_ == null) { registration_ = null; } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); errorCode_ = 0; bitField0_ = (bitField0_ & ~0x00000002); msg_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (registrationBuilder_ == null) { result.registration_ = registration_; } else { result.registration_ = registrationBuilder_.build(); } to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { result.errorCode_ = errorCode_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000004) != 0)) { to_bitField0_ |= 0x00000004; } result.msg_ = msg_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this; if (other.hasRegistration()) { mergeRegistration(other.getRegistration()); } if (other.hasErrorCode()) { setErrorCode(other.getErrorCode()); } if (other.hasMsg()) { bitField0_ |= 0x00000004; msg_ = other.msg_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasRegistration()) { return false; } if (!hasErrorCode()) { return false; } if (!hasMsg()) { return false; } if (!getRegistration().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { if (registrationBuilder_ == null) { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } else { return registrationBuilder_.getMessage(); } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } registration_ = value; onChanged(); } else { registrationBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder builderForValue) { if (registrationBuilder_ == null) { registration_ = builderForValue.build(); onChanged(); } else { registrationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && registration_ != null && registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance()) { registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); } else { registration_ = value; } onChanged(); } else { registrationBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder clearRegistration() { if (registrationBuilder_ == null) { registration_ = null; onChanged(); } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { bitField0_ |= 0x00000001; onChanged(); return getRegistrationFieldBuilder().getBuilder(); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { if (registrationBuilder_ != null) { return registrationBuilder_.getMessageOrBuilder(); } else { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> getRegistrationFieldBuilder() { if (registrationBuilder_ == null) { registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder>( getRegistration(), getParentForChildren(), isClean()); registration_ = null; } return registrationBuilder_; } private int errorCode_ ; /** *
       * Error code
       * 
* * required uint32 errorCode = 2; */ public boolean hasErrorCode() { return ((bitField0_ & 0x00000002) != 0); } /** *
       * Error code
       * 
* * required uint32 errorCode = 2; */ public int getErrorCode() { return errorCode_; } /** *
       * Error code
       * 
* * required uint32 errorCode = 2; */ public Builder setErrorCode(int value) { bitField0_ |= 0x00000002; errorCode_ = value; onChanged(); return this; } /** *
       * Error code
       * 
* * required uint32 errorCode = 2; */ public Builder clearErrorCode() { bitField0_ = (bitField0_ & ~0x00000002); errorCode_ = 0; onChanged(); return this; } private java.lang.Object msg_ = ""; /** *
       * Error message
       * 
* * required string msg = 3; */ public boolean hasMsg() { return ((bitField0_ & 0x00000004) != 0); } /** *
       * Error message
       * 
* * required string msg = 3; */ public java.lang.String getMsg() { java.lang.Object ref = msg_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { msg_ = s; } return s; } else { return (java.lang.String) ref; } } /** *
       * Error message
       * 
* * required string msg = 3; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getMsgBytes() { java.lang.Object ref = msg_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); msg_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** *
       * Error message
       * 
* * required string msg = 3; */ public Builder setMsg( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; msg_ = value; onChanged(); return this; } /** *
       * Error message
       * 
* * required string msg = 3; */ public Builder clearMsg() { bitField0_ = (bitField0_ & ~0x00000004); msg_ = getDefaultInstance().getMsg(); onChanged(); return this; } /** *
       * Error message
       * 
* * required string msg = 3; */ public Builder setMsgBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; msg_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.ErrorReportRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.ErrorReportRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ErrorReportRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ErrorReportRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface ErrorReportResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.ErrorReportResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void response
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.ErrorReportResponseProto} */ public static final class ErrorReportResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.ErrorReportResponseProto) ErrorReportResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use ErrorReportResponseProto.newBuilder() to construct. private ErrorReportResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private ErrorReportResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ErrorReportResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void response
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.ErrorReportResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.ErrorReportResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.ErrorReportResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.ErrorReportResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public ErrorReportResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new ErrorReportResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RegisterRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.RegisterRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ boolean hasRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); } /** *
   **
   * registration - Information of the namenode registering with primary namenode
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.RegisterRequestProto} */ public static final class RegisterRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.RegisterRequestProto) RegisterRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RegisterRequestProto.newBuilder() to construct. private RegisterRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RegisterRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RegisterRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = registration_.toBuilder(); } registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(registration_); registration_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.Builder.class); } private int bitField0_; public static final int REGISTRATION_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasRegistration()) { memoizedIsInitialized = 0; return false; } if (!getRegistration().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getRegistration()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getRegistration()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto) obj; if (hasRegistration() != other.hasRegistration()) return false; if (hasRegistration()) { if (!getRegistration() .equals(other.getRegistration())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasRegistration()) { hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; hash = (53 * hash) + getRegistration().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * registration - Information of the namenode registering with primary namenode
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.RegisterRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.RegisterRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getRegistrationFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (registrationBuilder_ == null) { registration_ = null; } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (registrationBuilder_ == null) { result.registration_ = registration_; } else { result.registration_ = registrationBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance()) return this; if (other.hasRegistration()) { mergeRegistration(other.getRegistration()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasRegistration()) { return false; } if (!getRegistration().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { if (registrationBuilder_ == null) { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } else { return registrationBuilder_.getMessage(); } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } registration_ = value; onChanged(); } else { registrationBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder builderForValue) { if (registrationBuilder_ == null) { registration_ = builderForValue.build(); onChanged(); } else { registrationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && registration_ != null && registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance()) { registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); } else { registration_ = value; } onChanged(); } else { registrationBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder clearRegistration() { if (registrationBuilder_ == null) { registration_ = null; onChanged(); } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { bitField0_ |= 0x00000001; onChanged(); return getRegistrationFieldBuilder().getBuilder(); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { if (registrationBuilder_ != null) { return registrationBuilder_.getMessageOrBuilder(); } else { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> getRegistrationFieldBuilder() { if (registrationBuilder_ == null) { registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder>( getRegistration(), getParentForChildren(), isClean()); registration_ = null; } return registrationBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.RegisterRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.RegisterRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RegisterRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new RegisterRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RegisterResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.RegisterResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ boolean hasRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); } /** *
   **
   * registration - Updated registration information of the newly registered
   *                datanode.
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.RegisterResponseProto} */ public static final class RegisterResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.RegisterResponseProto) RegisterResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use RegisterResponseProto.newBuilder() to construct. private RegisterResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RegisterResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RegisterResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = registration_.toBuilder(); } registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(registration_); registration_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.Builder.class); } private int bitField0_; public static final int REGISTRATION_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasRegistration()) { memoizedIsInitialized = 0; return false; } if (!getRegistration().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getRegistration()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getRegistration()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) obj; if (hasRegistration() != other.hasRegistration()) return false; if (hasRegistration()) { if (!getRegistration() .equals(other.getRegistration())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasRegistration()) { hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; hash = (53 * hash) + getRegistration().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * registration - Updated registration information of the newly registered
     *                datanode.
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.RegisterResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.RegisterResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getRegistrationFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (registrationBuilder_ == null) { registration_ = null; } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_RegisterResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (registrationBuilder_ == null) { result.registration_ = registration_; } else { result.registration_ = registrationBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance()) return this; if (other.hasRegistration()) { mergeRegistration(other.getRegistration()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasRegistration()) { return false; } if (!getRegistration().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { if (registrationBuilder_ == null) { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } else { return registrationBuilder_.getMessage(); } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } registration_ = value; onChanged(); } else { registrationBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder builderForValue) { if (registrationBuilder_ == null) { registration_ = builderForValue.build(); onChanged(); } else { registrationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && registration_ != null && registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance()) { registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); } else { registration_ = value; } onChanged(); } else { registrationBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder clearRegistration() { if (registrationBuilder_ == null) { registration_ = null; onChanged(); } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { bitField0_ |= 0x00000001; onChanged(); return getRegistrationFieldBuilder().getBuilder(); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { if (registrationBuilder_ != null) { return registrationBuilder_.getMessageOrBuilder(); } else { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> getRegistrationFieldBuilder() { if (registrationBuilder_ == null) { registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder>( getRegistration(), getParentForChildren(), isClean()); registration_ = null; } return registrationBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.RegisterResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.RegisterResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public RegisterResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new RegisterResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StartCheckpointRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.StartCheckpointRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ boolean hasRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); } /** *
   **
   * Start checkpoint request
   * registration - Namenode that is starting the checkpoint
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.StartCheckpointRequestProto} */ public static final class StartCheckpointRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.StartCheckpointRequestProto) StartCheckpointRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StartCheckpointRequestProto.newBuilder() to construct. private StartCheckpointRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StartCheckpointRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StartCheckpointRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = registration_.toBuilder(); } registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(registration_); registration_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.Builder.class); } private int bitField0_; public static final int REGISTRATION_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasRegistration()) { memoizedIsInitialized = 0; return false; } if (!getRegistration().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getRegistration()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getRegistration()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto) obj; if (hasRegistration() != other.hasRegistration()) return false; if (hasRegistration()) { if (!getRegistration() .equals(other.getRegistration())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasRegistration()) { hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; hash = (53 * hash) + getRegistration().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * Start checkpoint request
     * registration - Namenode that is starting the checkpoint
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.StartCheckpointRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.StartCheckpointRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getRegistrationFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (registrationBuilder_ == null) { registration_ = null; } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (registrationBuilder_ == null) { result.registration_ = registration_; } else { result.registration_ = registrationBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance()) return this; if (other.hasRegistration()) { mergeRegistration(other.getRegistration()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasRegistration()) { return false; } if (!getRegistration().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { if (registrationBuilder_ == null) { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } else { return registrationBuilder_.getMessage(); } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } registration_ = value; onChanged(); } else { registrationBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder builderForValue) { if (registrationBuilder_ == null) { registration_ = builderForValue.build(); onChanged(); } else { registrationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && registration_ != null && registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance()) { registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); } else { registration_ = value; } onChanged(); } else { registrationBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder clearRegistration() { if (registrationBuilder_ == null) { registration_ = null; onChanged(); } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { bitField0_ |= 0x00000001; onChanged(); return getRegistrationFieldBuilder().getBuilder(); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { if (registrationBuilder_ != null) { return registrationBuilder_.getMessageOrBuilder(); } else { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> getRegistrationFieldBuilder() { if (registrationBuilder_ == null) { registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder>( getRegistration(), getParentForChildren(), isClean()); registration_ = null; } return registrationBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.StartCheckpointRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.StartCheckpointRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StartCheckpointRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StartCheckpointRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface StartCheckpointResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.StartCheckpointResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ boolean hasCommand(); /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto getCommand(); /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder(); } /** *
   **
   * command - Command returned by the active namenode to be
   *           be handled by the caller.
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.StartCheckpointResponseProto} */ public static final class StartCheckpointResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.StartCheckpointResponseProto) StartCheckpointResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use StartCheckpointResponseProto.newBuilder() to construct. private StartCheckpointResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private StartCheckpointResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StartCheckpointResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = command_.toBuilder(); } command_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(command_); command_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.Builder.class); } private int bitField0_; public static final int COMMAND_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto command_; /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public boolean hasCommand() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto getCommand() { return command_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDefaultInstance() : command_; } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder() { return command_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDefaultInstance() : command_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasCommand()) { memoizedIsInitialized = 0; return false; } if (!getCommand().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getCommand()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getCommand()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) obj; if (hasCommand() != other.hasCommand()) return false; if (hasCommand()) { if (!getCommand() .equals(other.getCommand())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasCommand()) { hash = (37 * hash) + COMMAND_FIELD_NUMBER; hash = (53 * hash) + getCommand().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * command - Command returned by the active namenode to be
     *           be handled by the caller.
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.StartCheckpointResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.StartCheckpointResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getCommandFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (commandBuilder_ == null) { command_ = null; } else { commandBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (commandBuilder_ == null) { result.command_ = command_; } else { result.command_ = commandBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance()) return this; if (other.hasCommand()) { mergeCommand(other.getCommand()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasCommand()) { return false; } if (!getCommand().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto command_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProtoOrBuilder> commandBuilder_; /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public boolean hasCommand() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto getCommand() { if (commandBuilder_ == null) { return command_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDefaultInstance() : command_; } else { return commandBuilder_.getMessage(); } } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public Builder setCommand(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto value) { if (commandBuilder_ == null) { if (value == null) { throw new NullPointerException(); } command_ = value; onChanged(); } else { commandBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public Builder setCommand( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder builderForValue) { if (commandBuilder_ == null) { command_ = builderForValue.build(); onChanged(); } else { commandBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public Builder mergeCommand(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto value) { if (commandBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && command_ != null && command_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDefaultInstance()) { command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.newBuilder(command_).mergeFrom(value).buildPartial(); } else { command_ = value; } onChanged(); } else { commandBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public Builder clearCommand() { if (commandBuilder_ == null) { command_ = null; onChanged(); } else { commandBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder getCommandBuilder() { bitField0_ |= 0x00000001; onChanged(); return getCommandFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder() { if (commandBuilder_ != null) { return commandBuilder_.getMessageOrBuilder(); } else { return command_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDefaultInstance() : command_; } } /** * required .hadoop.hdfs.NamenodeCommandProto command = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProtoOrBuilder> getCommandFieldBuilder() { if (commandBuilder_ == null) { commandBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProtoOrBuilder>( getCommand(), getParentForChildren(), isClean()); command_ = null; } return commandBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.StartCheckpointResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.StartCheckpointResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public StartCheckpointResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new StartCheckpointResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface EndCheckpointRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.EndCheckpointRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ boolean hasRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration(); /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ boolean hasSignature(); /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature(); /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); } /** *
   **
   * End or finalize the previously started checkpoint
   * registration - Namenode that is ending the checkpoint
   * signature - unique token to identify checkpoint transaction,
   *             that was received when checkpoint was started.
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.EndCheckpointRequestProto} */ public static final class EndCheckpointRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.EndCheckpointRequestProto) EndCheckpointRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use EndCheckpointRequestProto.newBuilder() to construct. private EndCheckpointRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private EndCheckpointRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private EndCheckpointRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = registration_.toBuilder(); } registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(registration_); registration_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) != 0)) { subBuilder = signature_.toBuilder(); } signature_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(signature_); signature_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.Builder.class); } private int bitField0_; public static final int REGISTRATION_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } /** *
     * Registration info
     * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } public static final int SIGNATURE_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto signature_; /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public boolean hasSignature() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature() { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasRegistration()) { memoizedIsInitialized = 0; return false; } if (!hasSignature()) { memoizedIsInitialized = 0; return false; } if (!getRegistration().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getSignature().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getRegistration()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(2, getSignature()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getRegistration()); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(2, getSignature()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto) obj; if (hasRegistration() != other.hasRegistration()) return false; if (hasRegistration()) { if (!getRegistration() .equals(other.getRegistration())) return false; } if (hasSignature() != other.hasSignature()) return false; if (hasSignature()) { if (!getSignature() .equals(other.getSignature())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasRegistration()) { hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; hash = (53 * hash) + getRegistration().hashCode(); } if (hasSignature()) { hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; hash = (53 * hash) + getSignature().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * End or finalize the previously started checkpoint
     * registration - Namenode that is ending the checkpoint
     * signature - unique token to identify checkpoint transaction,
     *             that was received when checkpoint was started.
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.EndCheckpointRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.EndCheckpointRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getRegistrationFieldBuilder(); getSignatureFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (registrationBuilder_ == null) { registration_ = null; } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (signatureBuilder_ == null) { signature_ = null; } else { signatureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (registrationBuilder_ == null) { result.registration_ = registration_; } else { result.registration_ = registrationBuilder_.build(); } to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { if (signatureBuilder_ == null) { result.signature_ = signature_; } else { result.signature_ = signatureBuilder_.build(); } to_bitField0_ |= 0x00000002; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance()) return this; if (other.hasRegistration()) { mergeRegistration(other.getRegistration()); } if (other.hasSignature()) { mergeSignature(other.getSignature()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasRegistration()) { return false; } if (!hasSignature()) { return false; } if (!getRegistration().isInitialized()) { return false; } if (!getSignature().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto registration_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public boolean hasRegistration() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getRegistration() { if (registrationBuilder_ == null) { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } else { return registrationBuilder_.getMessage(); } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } registration_ = value; onChanged(); } else { registrationBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder setRegistration( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder builderForValue) { if (registrationBuilder_ == null) { registration_ = builderForValue.build(); onChanged(); } else { registrationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto value) { if (registrationBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && registration_ != null && registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance()) { registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); } else { registration_ = value; } onChanged(); } else { registrationBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public Builder clearRegistration() { if (registrationBuilder_ == null) { registration_ = null; onChanged(); } else { registrationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { bitField0_ |= 0x00000001; onChanged(); return getRegistrationFieldBuilder().getBuilder(); } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { if (registrationBuilder_ != null) { return registrationBuilder_.getMessageOrBuilder(); } else { return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance() : registration_; } } /** *
       * Registration info
       * 
* * required .hadoop.hdfs.NamenodeRegistrationProto registration = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder> getRegistrationFieldBuilder() { if (registrationBuilder_ == null) { registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder>( getRegistration(), getParentForChildren(), isClean()); registration_ = null; } return registrationBuilder_; } private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto signature_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public boolean hasSignature() { return ((bitField0_ & 0x00000002) != 0); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature() { if (signatureBuilder_ == null) { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } else { return signatureBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto value) { if (signatureBuilder_ == null) { if (value == null) { throw new NullPointerException(); } signature_ = value; onChanged(); } else { signatureBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public Builder setSignature( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder builderForValue) { if (signatureBuilder_ == null) { signature_ = builderForValue.build(); onChanged(); } else { signatureBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto value) { if (signatureBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0) && signature_ != null && signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance()) { signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); } else { signature_ = value; } onChanged(); } else { signatureBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public Builder clearSignature() { if (signatureBuilder_ == null) { signature_ = null; onChanged(); } else { signatureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { bitField0_ |= 0x00000002; onChanged(); return getSignatureFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { if (signatureBuilder_ != null) { return signatureBuilder_.getMessageOrBuilder(); } else { return signature_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance() : signature_; } } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 2; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder> getSignatureFieldBuilder() { if (signatureBuilder_ == null) { signatureBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder>( getSignature(), getParentForChildren(), isClean()); signature_ = null; } return signatureBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.EndCheckpointRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.EndCheckpointRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public EndCheckpointRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new EndCheckpointRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface EndCheckpointResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.EndCheckpointResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void response
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.EndCheckpointResponseProto} */ public static final class EndCheckpointResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.EndCheckpointResponseProto) EndCheckpointResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use EndCheckpointResponseProto.newBuilder() to construct. private EndCheckpointResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private EndCheckpointResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private EndCheckpointResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void response
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.EndCheckpointResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.EndCheckpointResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.EndCheckpointResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.EndCheckpointResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public EndCheckpointResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new EndCheckpointResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetEditLogManifestRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetEditLogManifestRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** *
     * Transaction ID
     * 
* * required uint64 sinceTxId = 1; */ boolean hasSinceTxId(); /** *
     * Transaction ID
     * 
* * required uint64 sinceTxId = 1; */ long getSinceTxId(); } /** *
   **
   * sinceTxId - return the editlog information for transactions >= sinceTxId
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetEditLogManifestRequestProto} */ public static final class GetEditLogManifestRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetEditLogManifestRequestProto) GetEditLogManifestRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetEditLogManifestRequestProto.newBuilder() to construct. private GetEditLogManifestRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetEditLogManifestRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetEditLogManifestRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; sinceTxId_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.Builder.class); } private int bitField0_; public static final int SINCETXID_FIELD_NUMBER = 1; private long sinceTxId_; /** *
     * Transaction ID
     * 
* * required uint64 sinceTxId = 1; */ public boolean hasSinceTxId() { return ((bitField0_ & 0x00000001) != 0); } /** *
     * Transaction ID
     * 
* * required uint64 sinceTxId = 1; */ public long getSinceTxId() { return sinceTxId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSinceTxId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, sinceTxId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, sinceTxId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto) obj; if (hasSinceTxId() != other.hasSinceTxId()) return false; if (hasSinceTxId()) { if (getSinceTxId() != other.getSinceTxId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSinceTxId()) { hash = (37 * hash) + SINCETXID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSinceTxId()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * sinceTxId - return the editlog information for transactions >= sinceTxId
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetEditLogManifestRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetEditLogManifestRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); sinceTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.sinceTxId_ = sinceTxId_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this; if (other.hasSinceTxId()) { setSinceTxId(other.getSinceTxId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSinceTxId()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long sinceTxId_ ; /** *
       * Transaction ID
       * 
* * required uint64 sinceTxId = 1; */ public boolean hasSinceTxId() { return ((bitField0_ & 0x00000001) != 0); } /** *
       * Transaction ID
       * 
* * required uint64 sinceTxId = 1; */ public long getSinceTxId() { return sinceTxId_; } /** *
       * Transaction ID
       * 
* * required uint64 sinceTxId = 1; */ public Builder setSinceTxId(long value) { bitField0_ |= 0x00000001; sinceTxId_ = value; onChanged(); return this; } /** *
       * Transaction ID
       * 
* * required uint64 sinceTxId = 1; */ public Builder clearSinceTxId() { bitField0_ = (bitField0_ & ~0x00000001); sinceTxId_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetEditLogManifestRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetEditLogManifestRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetEditLogManifestRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetEditLogManifestRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetEditLogManifestResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetEditLogManifestResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ boolean hasManifest(); /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto getManifest(); /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder(); } /** *
   **
   * manifest - Enumeration of editlogs from namenode for 
   *            logs >= sinceTxId in the request
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetEditLogManifestResponseProto} */ public static final class GetEditLogManifestResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetEditLogManifestResponseProto) GetEditLogManifestResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetEditLogManifestResponseProto.newBuilder() to construct. private GetEditLogManifestResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetEditLogManifestResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetEditLogManifestResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) != 0)) { subBuilder = manifest_.toBuilder(); } manifest_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(manifest_); manifest_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.Builder.class); } private int bitField0_; public static final int MANIFEST_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto manifest_; /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public boolean hasManifest() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto getManifest() { return manifest_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance() : manifest_; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { return manifest_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance() : manifest_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasManifest()) { memoizedIsInitialized = 0; return false; } if (!getManifest().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(1, getManifest()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, getManifest()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) obj; if (hasManifest() != other.hasManifest()) return false; if (hasManifest()) { if (!getManifest() .equals(other.getManifest())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasManifest()) { hash = (37 * hash) + MANIFEST_FIELD_NUMBER; hash = (53 * hash) + getManifest().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * manifest - Enumeration of editlogs from namenode for 
     *            logs >= sinceTxId in the request
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.GetEditLogManifestResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetEditLogManifestResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getManifestFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (manifestBuilder_ == null) { manifest_ = null; } else { manifestBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { if (manifestBuilder_ == null) { result.manifest_ = manifest_; } else { result.manifest_ = manifestBuilder_.build(); } to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this; if (other.hasManifest()) { mergeManifest(other.getManifest()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasManifest()) { return false; } if (!getManifest().isInitialized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto manifest_; private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_; /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public boolean hasManifest() { return ((bitField0_ & 0x00000001) != 0); } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto getManifest() { if (manifestBuilder_ == null) { return manifest_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance() : manifest_; } else { return manifestBuilder_.getMessage(); } } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto value) { if (manifestBuilder_ == null) { if (value == null) { throw new NullPointerException(); } manifest_ = value; onChanged(); } else { manifestBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder setManifest( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder builderForValue) { if (manifestBuilder_ == null) { manifest_ = builderForValue.build(); onChanged(); } else { manifestBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto value) { if (manifestBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0) && manifest_ != null && manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance()) { manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial(); } else { manifest_ = value; } onChanged(); } else { manifestBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder clearManifest() { if (manifestBuilder_ == null) { manifest_ = null; onChanged(); } else { manifestBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() { bitField0_ |= 0x00000001; onChanged(); return getManifestFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { if (manifestBuilder_ != null) { return manifestBuilder_.getMessageOrBuilder(); } else { return manifest_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance() : manifest_; } } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder> getManifestFieldBuilder() { if (manifestBuilder_ == null) { manifestBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder>( getManifest(), getParentForChildren(), isClean()); manifest_ = null; } return manifestBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetEditLogManifestResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetEditLogManifestResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetEditLogManifestResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetEditLogManifestResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface IsUpgradeFinalizedRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void request
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto} */ public static final class IsUpgradeFinalizedRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto) IsUpgradeFinalizedRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use IsUpgradeFinalizedRequestProto.newBuilder() to construct. private IsUpgradeFinalizedRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private IsUpgradeFinalizedRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsUpgradeFinalizedRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void request
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public IsUpgradeFinalizedRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new IsUpgradeFinalizedRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface IsUpgradeFinalizedResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bool isUpgradeFinalized = 1; */ boolean hasIsUpgradeFinalized(); /** * required bool isUpgradeFinalized = 1; */ boolean getIsUpgradeFinalized(); } /** * Protobuf type {@code hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto} */ public static final class IsUpgradeFinalizedResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto) IsUpgradeFinalizedResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use IsUpgradeFinalizedResponseProto.newBuilder() to construct. private IsUpgradeFinalizedResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private IsUpgradeFinalizedResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsUpgradeFinalizedResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; isUpgradeFinalized_ = input.readBool(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.Builder.class); } private int bitField0_; public static final int ISUPGRADEFINALIZED_FIELD_NUMBER = 1; private boolean isUpgradeFinalized_; /** * required bool isUpgradeFinalized = 1; */ public boolean hasIsUpgradeFinalized() { return ((bitField0_ & 0x00000001) != 0); } /** * required bool isUpgradeFinalized = 1; */ public boolean getIsUpgradeFinalized() { return isUpgradeFinalized_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasIsUpgradeFinalized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBool(1, isUpgradeFinalized_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(1, isUpgradeFinalized_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto) obj; if (hasIsUpgradeFinalized() != other.hasIsUpgradeFinalized()) return false; if (hasIsUpgradeFinalized()) { if (getIsUpgradeFinalized() != other.getIsUpgradeFinalized()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasIsUpgradeFinalized()) { hash = (37 * hash) + ISUPGRADEFINALIZED_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsUpgradeFinalized()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); isUpgradeFinalized_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.isUpgradeFinalized_ = isUpgradeFinalized_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.getDefaultInstance()) return this; if (other.hasIsUpgradeFinalized()) { setIsUpgradeFinalized(other.getIsUpgradeFinalized()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasIsUpgradeFinalized()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private boolean isUpgradeFinalized_ ; /** * required bool isUpgradeFinalized = 1; */ public boolean hasIsUpgradeFinalized() { return ((bitField0_ & 0x00000001) != 0); } /** * required bool isUpgradeFinalized = 1; */ public boolean getIsUpgradeFinalized() { return isUpgradeFinalized_; } /** * required bool isUpgradeFinalized = 1; */ public Builder setIsUpgradeFinalized(boolean value) { bitField0_ |= 0x00000001; isUpgradeFinalized_ = value; onChanged(); return this; } /** * required bool isUpgradeFinalized = 1; */ public Builder clearIsUpgradeFinalized() { bitField0_ = (bitField0_ & ~0x00000001); isUpgradeFinalized_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public IsUpgradeFinalizedResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new IsUpgradeFinalizedResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface IsRollingUpgradeRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.IsRollingUpgradeRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** *
   **
   * void request
   * 
* * Protobuf type {@code hadoop.hdfs.namenode.IsRollingUpgradeRequestProto} */ public static final class IsRollingUpgradeRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.IsRollingUpgradeRequestProto) IsRollingUpgradeRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use IsRollingUpgradeRequestProto.newBuilder() to construct. private IsRollingUpgradeRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private IsRollingUpgradeRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsRollingUpgradeRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
     **
     * void request
     * 
* * Protobuf type {@code hadoop.hdfs.namenode.IsRollingUpgradeRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.IsRollingUpgradeRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.IsRollingUpgradeRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.IsRollingUpgradeRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public IsRollingUpgradeRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new IsRollingUpgradeRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface IsRollingUpgradeResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.IsRollingUpgradeResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required bool isRollingUpgrade = 1; */ boolean hasIsRollingUpgrade(); /** * required bool isRollingUpgrade = 1; */ boolean getIsRollingUpgrade(); } /** * Protobuf type {@code hadoop.hdfs.namenode.IsRollingUpgradeResponseProto} */ public static final class IsRollingUpgradeResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.IsRollingUpgradeResponseProto) IsRollingUpgradeResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use IsRollingUpgradeResponseProto.newBuilder() to construct. private IsRollingUpgradeResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private IsRollingUpgradeResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsRollingUpgradeResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; isRollingUpgrade_ = input.readBool(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.Builder.class); } private int bitField0_; public static final int ISROLLINGUPGRADE_FIELD_NUMBER = 1; private boolean isRollingUpgrade_; /** * required bool isRollingUpgrade = 1; */ public boolean hasIsRollingUpgrade() { return ((bitField0_ & 0x00000001) != 0); } /** * required bool isRollingUpgrade = 1; */ public boolean getIsRollingUpgrade() { return isRollingUpgrade_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasIsRollingUpgrade()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeBool(1, isRollingUpgrade_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeBoolSize(1, isRollingUpgrade_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto) obj; if (hasIsRollingUpgrade() != other.hasIsRollingUpgrade()) return false; if (hasIsRollingUpgrade()) { if (getIsRollingUpgrade() != other.getIsRollingUpgrade()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasIsRollingUpgrade()) { hash = (37 * hash) + ISROLLINGUPGRADE_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean( getIsRollingUpgrade()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.namenode.IsRollingUpgradeResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.IsRollingUpgradeResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); isRollingUpgrade_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.isRollingUpgrade_ = isRollingUpgrade_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.getDefaultInstance()) return this; if (other.hasIsRollingUpgrade()) { setIsRollingUpgrade(other.getIsRollingUpgrade()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasIsRollingUpgrade()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private boolean isRollingUpgrade_ ; /** * required bool isRollingUpgrade = 1; */ public boolean hasIsRollingUpgrade() { return ((bitField0_ & 0x00000001) != 0); } /** * required bool isRollingUpgrade = 1; */ public boolean getIsRollingUpgrade() { return isRollingUpgrade_; } /** * required bool isRollingUpgrade = 1; */ public Builder setIsRollingUpgrade(boolean value) { bitField0_ |= 0x00000001; isRollingUpgrade_ = value; onChanged(); return this; } /** * required bool isRollingUpgrade = 1; */ public Builder clearIsRollingUpgrade() { bitField0_ = (bitField0_ & ~0x00000001); isRollingUpgrade_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.IsRollingUpgradeResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.IsRollingUpgradeResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public IsRollingUpgradeResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new IsRollingUpgradeResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetFilePathRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetFilePathRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required uint64 fileId = 1; */ boolean hasFileId(); /** * required uint64 fileId = 1; */ long getFileId(); } /** * Protobuf type {@code hadoop.hdfs.namenode.GetFilePathRequestProto} */ public static final class GetFilePathRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetFilePathRequestProto) GetFilePathRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetFilePathRequestProto.newBuilder() to construct. private GetFilePathRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetFilePathRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFilePathRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; fileId_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto.Builder.class); } private int bitField0_; public static final int FILEID_FIELD_NUMBER = 1; private long fileId_; /** * required uint64 fileId = 1; */ public boolean hasFileId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileId = 1; */ public long getFileId() { return fileId_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFileId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, fileId_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, fileId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto) obj; if (hasFileId() != other.hasFileId()) return false; if (hasFileId()) { if (getFileId() != other.getFileId()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFileId()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.namenode.GetFilePathRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetFilePathRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.fileId_ = fileId_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto.getDefaultInstance()) return this; if (other.hasFileId()) { setFileId(other.getFileId()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFileId()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long fileId_ ; /** * required uint64 fileId = 1; */ public boolean hasFileId() { return ((bitField0_ & 0x00000001) != 0); } /** * required uint64 fileId = 1; */ public long getFileId() { return fileId_; } /** * required uint64 fileId = 1; */ public Builder setFileId(long value) { bitField0_ |= 0x00000001; fileId_ = value; onChanged(); return this; } /** * required uint64 fileId = 1; */ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000001); fileId_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetFilePathRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetFilePathRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetFilePathRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetFilePathRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetFilePathResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetFilePathResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * required string srcPath = 1; */ boolean hasSrcPath(); /** * required string srcPath = 1; */ java.lang.String getSrcPath(); /** * required string srcPath = 1; */ org.apache.hadoop.thirdparty.protobuf.ByteString getSrcPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.namenode.GetFilePathResponseProto} */ public static final class GetFilePathResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetFilePathResponseProto) GetFilePathResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetFilePathResponseProto.newBuilder() to construct. private GetFilePathResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetFilePathResponseProto() { srcPath_ = ""; } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFilePathResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; srcPath_ = bs; break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto.Builder.class); } private int bitField0_; public static final int SRCPATH_FIELD_NUMBER = 1; private volatile java.lang.Object srcPath_; /** * required string srcPath = 1; */ public boolean hasSrcPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string srcPath = 1; */ public java.lang.String getSrcPath() { java.lang.Object ref = srcPath_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { srcPath_ = s; } return s; } } /** * required string srcPath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSrcPathBytes() { java.lang.Object ref = srcPath_; if (ref instanceof java.lang.String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); srcPath_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasSrcPath()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, srcPath_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, srcPath_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto) obj; if (hasSrcPath() != other.hasSrcPath()) return false; if (hasSrcPath()) { if (!getSrcPath() .equals(other.getSrcPath())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSrcPath()) { hash = (37 * hash) + SRCPATH_FIELD_NUMBER; hash = (53 * hash) + getSrcPath().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.namenode.GetFilePathResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetFilePathResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); srcPath_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { to_bitField0_ |= 0x00000001; } result.srcPath_ = srcPath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto.getDefaultInstance()) return this; if (other.hasSrcPath()) { bitField0_ |= 0x00000001; srcPath_ = other.srcPath_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasSrcPath()) { return false; } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object srcPath_ = ""; /** * required string srcPath = 1; */ public boolean hasSrcPath() { return ((bitField0_ & 0x00000001) != 0); } /** * required string srcPath = 1; */ public java.lang.String getSrcPath() { java.lang.Object ref = srcPath_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.thirdparty.protobuf.ByteString bs = (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { srcPath_ = s; } return s; } else { return (java.lang.String) ref; } } /** * required string srcPath = 1; */ public org.apache.hadoop.thirdparty.protobuf.ByteString getSrcPathBytes() { java.lang.Object ref = srcPath_; if (ref instanceof String) { org.apache.hadoop.thirdparty.protobuf.ByteString b = org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); srcPath_ = b; return b; } else { return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref; } } /** * required string srcPath = 1; */ public Builder setSrcPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; srcPath_ = value; onChanged(); return this; } /** * required string srcPath = 1; */ public Builder clearSrcPath() { bitField0_ = (bitField0_ & ~0x00000001); srcPath_ = getDefaultInstance().getSrcPath(); onChanged(); return this; } /** * required string srcPath = 1; */ public Builder setSrcPathBytes( org.apache.hadoop.thirdparty.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; srcPath_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetFilePathResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetFilePathResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetFilePathResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetFilePathResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetNextSPSPathRequestProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetNextSPSPathRequestProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.namenode.GetNextSPSPathRequestProto} */ public static final class GetNextSPSPathRequestProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetNextSPSPathRequestProto) GetNextSPSPathRequestProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetNextSPSPathRequestProto.newBuilder() to construct. private GetNextSPSPathRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetNextSPSPathRequestProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetNextSPSPathRequestProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.namenode.GetNextSPSPathRequestProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetNextSPSPathRequestProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetNextSPSPathRequestProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetNextSPSPathRequestProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetNextSPSPathRequestProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetNextSPSPathRequestProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface GetNextSPSPathResponseProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:hadoop.hdfs.namenode.GetNextSPSPathResponseProto) org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder { /** * optional uint64 spsPath = 1; */ boolean hasSpsPath(); /** * optional uint64 spsPath = 1; */ long getSpsPath(); } /** * Protobuf type {@code hadoop.hdfs.namenode.GetNextSPSPathResponseProto} */ public static final class GetNextSPSPathResponseProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.namenode.GetNextSPSPathResponseProto) GetNextSPSPathResponseProtoOrBuilder { private static final long serialVersionUID = 0L; // Use GetNextSPSPathResponseProto.newBuilder() to construct. private GetNextSPSPathResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private GetNextSPSPathResponseProto() { } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetNextSPSPathResponseProto( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { bitField0_ |= 0x00000001; spsPath_ = input.readUInt64(); break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.Builder.class); } private int bitField0_; public static final int SPSPATH_FIELD_NUMBER = 1; private long spsPath_; /** * optional uint64 spsPath = 1; */ public boolean hasSpsPath() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 spsPath = 1; */ public long getSpsPath() { return spsPath_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeUInt64(1, spsPath_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeUInt64Size(1, spsPath_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto) obj; if (hasSpsPath() != other.hasSpsPath()) return false; if (hasSpsPath()) { if (getSpsPath() != other.getSpsPath()) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSpsPath()) { hash = (37 * hash) + SPSPATH_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSpsPath()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.namenode.GetNextSPSPathResponseProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.namenode.GetNextSPSPathResponseProto) org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); spsPath_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { result.spsPath_ = spsPath_; to_bitField0_ |= 0x00000001; } result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.getDefaultInstance()) return this; if (other.hasSpsPath()) { setSpsPath(other.getSpsPath()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private long spsPath_ ; /** * optional uint64 spsPath = 1; */ public boolean hasSpsPath() { return ((bitField0_ & 0x00000001) != 0); } /** * optional uint64 spsPath = 1; */ public long getSpsPath() { return spsPath_; } /** * optional uint64 spsPath = 1; */ public Builder setSpsPath(long value) { bitField0_ |= 0x00000001; spsPath_ = value; onChanged(); return this; } /** * optional uint64 spsPath = 1; */ public Builder clearSpsPath() { bitField0_ = (bitField0_ & ~0x00000001); spsPath_ = 0L; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.namenode.GetNextSPSPathResponseProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.GetNextSPSPathResponseProto) private static final org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto(); } public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public GetNextSPSPathResponseProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return new GetNextSPSPathResponseProto(input, extensionRegistry); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } /** *
   **
   * Protocol used by the sub-ordinate namenode to send requests
   * the active/primary namenode.
   * See the request and response for details of rpc call.
   * 
* * Protobuf service {@code hadoop.hdfs.namenode.NamenodeProtocolService} */ public static abstract class NamenodeProtocolService implements org.apache.hadoop.thirdparty.protobuf.Service { protected NamenodeProtocolService() {} public interface Interface { /** *
       **
       * Get list of blocks for a given datanode with length
       * of blocks adding up to given size.
       * 
* * rpc getBlocks(.hadoop.hdfs.namenode.GetBlocksRequestProto) returns (.hadoop.hdfs.namenode.GetBlocksResponseProto); */ public abstract void getBlocks( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Get the current block keys
       * 
* * rpc getBlockKeys(.hadoop.hdfs.namenode.GetBlockKeysRequestProto) returns (.hadoop.hdfs.namenode.GetBlockKeysResponseProto); */ public abstract void getBlockKeys( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Get the transaction ID of the most recently persisted editlog record
       * 
* * rpc getTransactionId(.hadoop.hdfs.namenode.GetTransactionIdRequestProto) returns (.hadoop.hdfs.namenode.GetTransactionIdResponseProto); */ public abstract void getTransactionId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Get the transaction ID of the most recently persisted editlog record
       * 
* * rpc getMostRecentCheckpointTxId(.hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto) returns (.hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto); */ public abstract void getMostRecentCheckpointTxId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Close the current editlog and open a new one for checkpointing purposes
       * 
* * rpc rollEditLog(.hadoop.hdfs.namenode.RollEditLogRequestProto) returns (.hadoop.hdfs.namenode.RollEditLogResponseProto); */ public abstract void rollEditLog( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Request info about the version running on this NameNode
       * 
* * rpc versionRequest(.hadoop.hdfs.VersionRequestProto) returns (.hadoop.hdfs.VersionResponseProto); */ public abstract void versionRequest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Report from a sub-ordinate namenode of an error to the active namenode.
       * Active namenode may decide to unregister the reporting namenode 
       * depending on the error.
       * 
* * rpc errorReport(.hadoop.hdfs.namenode.ErrorReportRequestProto) returns (.hadoop.hdfs.namenode.ErrorReportResponseProto); */ public abstract void errorReport( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Request to register a sub-ordinate namenode
       * 
* * rpc registerSubordinateNamenode(.hadoop.hdfs.namenode.RegisterRequestProto) returns (.hadoop.hdfs.namenode.RegisterResponseProto); */ public abstract void registerSubordinateNamenode( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Request to start a checkpoint. 
       * 
* * rpc startCheckpoint(.hadoop.hdfs.namenode.StartCheckpointRequestProto) returns (.hadoop.hdfs.namenode.StartCheckpointResponseProto); */ public abstract void startCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * End of finalize the previously started checkpoint
       * 
* * rpc endCheckpoint(.hadoop.hdfs.namenode.EndCheckpointRequestProto) returns (.hadoop.hdfs.namenode.EndCheckpointResponseProto); */ public abstract void endCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Get editlog manifests from the active namenode for all the editlogs
       * 
* * rpc getEditLogManifest(.hadoop.hdfs.namenode.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.namenode.GetEditLogManifestResponseProto); */ public abstract void getEditLogManifest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Return whether the NameNode is in upgrade state (false) or not (true)
       * 
* * rpc isUpgradeFinalized(.hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto) returns (.hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto); */ public abstract void isUpgradeFinalized( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Return whether the NameNode is in rolling upgrade (true) or not (false).
       * 
* * rpc isRollingUpgrade(.hadoop.hdfs.namenode.IsRollingUpgradeRequestProto) returns (.hadoop.hdfs.namenode.IsRollingUpgradeResponseProto); */ public abstract void isRollingUpgrade( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
       **
       * Return the sps path from namenode
       * 
* * rpc getNextSPSPath(.hadoop.hdfs.namenode.GetNextSPSPathRequestProto) returns (.hadoop.hdfs.namenode.GetNextSPSPathResponseProto); */ public abstract void getNextSPSPath( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); } public static org.apache.hadoop.thirdparty.protobuf.Service newReflectiveService( final Interface impl) { return new NamenodeProtocolService() { @java.lang.Override public void getBlocks( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.getBlocks(controller, request, done); } @java.lang.Override public void getBlockKeys( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.getBlockKeys(controller, request, done); } @java.lang.Override public void getTransactionId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.getTransactionId(controller, request, done); } @java.lang.Override public void getMostRecentCheckpointTxId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.getMostRecentCheckpointTxId(controller, request, done); } @java.lang.Override public void rollEditLog( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.rollEditLog(controller, request, done); } @java.lang.Override public void versionRequest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.versionRequest(controller, request, done); } @java.lang.Override public void errorReport( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.errorReport(controller, request, done); } @java.lang.Override public void registerSubordinateNamenode( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.registerSubordinateNamenode(controller, request, done); } @java.lang.Override public void startCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.startCheckpoint(controller, request, done); } @java.lang.Override public void endCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.endCheckpoint(controller, request, done); } @java.lang.Override public void getEditLogManifest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.getEditLogManifest(controller, request, done); } @java.lang.Override public void isUpgradeFinalized( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.isUpgradeFinalized(controller, request, done); } @java.lang.Override public void isRollingUpgrade( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.isRollingUpgrade(controller, request, done); } @java.lang.Override public void getNextSPSPath( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { impl.getNextSPSPath(controller, request, done); } }; } public static org.apache.hadoop.thirdparty.protobuf.BlockingService newReflectiveBlockingService(final BlockingInterface impl) { return new org.apache.hadoop.thirdparty.protobuf.BlockingService() { public final org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final org.apache.hadoop.thirdparty.protobuf.Message callBlockingMethod( org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method, org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.thirdparty.protobuf.Message request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callBlockingMethod() given method descriptor for " + "wrong service type."); } switch(method.getIndex()) { case 0: return impl.getBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)request); case 1: return impl.getBlockKeys(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)request); case 2: return impl.getTransactionId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)request); case 3: return impl.getMostRecentCheckpointTxId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto)request); case 4: return impl.rollEditLog(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)request); case 5: return impl.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto)request); case 6: return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)request); case 7: return impl.registerSubordinateNamenode(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)request); case 8: return impl.startCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)request); case 9: return impl.endCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)request); case 10: return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)request); case 11: return impl.isUpgradeFinalized(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto)request); case 12: return impl.isRollingUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto)request); case 13: return impl.getNextSPSPath(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto)request); default: throw new java.lang.AssertionError("Can't get here."); } } public final org.apache.hadoop.thirdparty.protobuf.Message getRequestPrototype( org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final org.apache.hadoop.thirdparty.protobuf.Message getResponsePrototype( org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } }; } /** *
     **
     * Get list of blocks for a given datanode with length
     * of blocks adding up to given size.
     * 
* * rpc getBlocks(.hadoop.hdfs.namenode.GetBlocksRequestProto) returns (.hadoop.hdfs.namenode.GetBlocksResponseProto); */ public abstract void getBlocks( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Get the current block keys
     * 
* * rpc getBlockKeys(.hadoop.hdfs.namenode.GetBlockKeysRequestProto) returns (.hadoop.hdfs.namenode.GetBlockKeysResponseProto); */ public abstract void getBlockKeys( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Get the transaction ID of the most recently persisted editlog record
     * 
* * rpc getTransactionId(.hadoop.hdfs.namenode.GetTransactionIdRequestProto) returns (.hadoop.hdfs.namenode.GetTransactionIdResponseProto); */ public abstract void getTransactionId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Get the transaction ID of the most recently persisted editlog record
     * 
* * rpc getMostRecentCheckpointTxId(.hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdRequestProto) returns (.hadoop.hdfs.namenode.GetMostRecentCheckpointTxIdResponseProto); */ public abstract void getMostRecentCheckpointTxId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Close the current editlog and open a new one for checkpointing purposes
     * 
* * rpc rollEditLog(.hadoop.hdfs.namenode.RollEditLogRequestProto) returns (.hadoop.hdfs.namenode.RollEditLogResponseProto); */ public abstract void rollEditLog( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Request info about the version running on this NameNode
     * 
* * rpc versionRequest(.hadoop.hdfs.VersionRequestProto) returns (.hadoop.hdfs.VersionResponseProto); */ public abstract void versionRequest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Report from a sub-ordinate namenode of an error to the active namenode.
     * Active namenode may decide to unregister the reporting namenode 
     * depending on the error.
     * 
* * rpc errorReport(.hadoop.hdfs.namenode.ErrorReportRequestProto) returns (.hadoop.hdfs.namenode.ErrorReportResponseProto); */ public abstract void errorReport( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Request to register a sub-ordinate namenode
     * 
* * rpc registerSubordinateNamenode(.hadoop.hdfs.namenode.RegisterRequestProto) returns (.hadoop.hdfs.namenode.RegisterResponseProto); */ public abstract void registerSubordinateNamenode( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Request to start a checkpoint. 
     * 
* * rpc startCheckpoint(.hadoop.hdfs.namenode.StartCheckpointRequestProto) returns (.hadoop.hdfs.namenode.StartCheckpointResponseProto); */ public abstract void startCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * End of finalize the previously started checkpoint
     * 
* * rpc endCheckpoint(.hadoop.hdfs.namenode.EndCheckpointRequestProto) returns (.hadoop.hdfs.namenode.EndCheckpointResponseProto); */ public abstract void endCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Get editlog manifests from the active namenode for all the editlogs
     * 
* * rpc getEditLogManifest(.hadoop.hdfs.namenode.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.namenode.GetEditLogManifestResponseProto); */ public abstract void getEditLogManifest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Return whether the NameNode is in upgrade state (false) or not (true)
     * 
* * rpc isUpgradeFinalized(.hadoop.hdfs.namenode.IsUpgradeFinalizedRequestProto) returns (.hadoop.hdfs.namenode.IsUpgradeFinalizedResponseProto); */ public abstract void isUpgradeFinalized( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Return whether the NameNode is in rolling upgrade (true) or not (false).
     * 
* * rpc isRollingUpgrade(.hadoop.hdfs.namenode.IsRollingUpgradeRequestProto) returns (.hadoop.hdfs.namenode.IsRollingUpgradeResponseProto); */ public abstract void isRollingUpgrade( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); /** *
     **
     * Return the sps path from namenode
     * 
* * rpc getNextSPSPath(.hadoop.hdfs.namenode.GetNextSPSPathRequestProto) returns (.hadoop.hdfs.namenode.GetNextSPSPathResponseProto); */ public abstract void getNextSPSPath( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done); public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.getDescriptor().getServices().get(0); } public final org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final void callMethod( org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method, org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.thirdparty.protobuf.Message request, org.apache.hadoop.thirdparty.protobuf.RpcCallback< org.apache.hadoop.thirdparty.protobuf.Message> done) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callMethod() given method descriptor for wrong " + "service type."); } switch(method.getIndex()) { case 0: this.getBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 1: this.getBlockKeys(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 2: this.getTransactionId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 3: this.getMostRecentCheckpointTxId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 4: this.rollEditLog(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 5: this.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 6: this.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 7: this.registerSubordinateNamenode(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 8: this.startCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 9: this.endCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 10: this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 11: this.isUpgradeFinalized(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 12: this.isRollingUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; case 13: this.getNextSPSPath(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto)request, org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback( done)); return; default: throw new java.lang.AssertionError("Can't get here."); } } public final org.apache.hadoop.thirdparty.protobuf.Message getRequestPrototype( org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final org.apache.hadoop.thirdparty.protobuf.Message getResponsePrototype( org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public static Stub newStub( org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) { return new Stub(channel); } public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService implements Interface { private Stub(org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) { this.channel = channel; } private final org.apache.hadoop.thirdparty.protobuf.RpcChannel channel; public org.apache.hadoop.thirdparty.protobuf.RpcChannel getChannel() { return channel; } public void getBlocks( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance())); } public void getBlockKeys( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance())); } public void getTransactionId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance())); } public void getMostRecentCheckpointTxId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.getDefaultInstance())); } public void rollEditLog( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance())); } public void versionRequest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance())); } public void errorReport( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance())); } public void registerSubordinateNamenode( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance())); } public void startCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance())); } public void endCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance())); } public void getEditLogManifest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance())); } public void isUpgradeFinalized( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.getDefaultInstance())); } public void isRollingUpgrade( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.getDefaultInstance())); } public void getNextSPSPath( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto request, org.apache.hadoop.thirdparty.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.getDefaultInstance(), org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.getDefaultInstance())); } } public static BlockingInterface newBlockingStub( org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) { return new BlockingStub(channel); } public interface BlockingInterface { public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getBlocks( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getBlockKeys( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getTransactionId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto getMostRecentCheckpointTxId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto rollEditLog( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto versionRequest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto errorReport( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto registerSubordinateNamenode( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto startCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto endCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto isUpgradeFinalized( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto isRollingUpgrade( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto getNextSPSPath( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { private BlockingStub(org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) { this.channel = channel; } private final org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel; public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getBlocks( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getBlockKeys( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getTransactionId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto getMostRecentCheckpointTxId( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto rollEditLog( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto versionRequest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto errorReport( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto registerSubordinateNamenode( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto startCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto endCheckpoint( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto isUpgradeFinalized( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto isRollingUpgrade( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto getNextSPSPath( org.apache.hadoop.thirdparty.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto request) throws org.apache.hadoop.thirdparty.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto.getDefaultInstance()); } } // @@protoc_insertion_point(class_scope:hadoop.hdfs.namenode.NamenodeProtocolService) } private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_RegisterRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_RegisterRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_RegisterResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_RegisterResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_fieldAccessorTable; public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\026NamenodeProtocol.proto\022\024hadoop.hdfs.na" + "menode\032\nhdfs.proto\032\020HdfsServer.proto\"u\n\025" + "GetBlocksRequestProto\022.\n\010datanode\030\001 \002(\0132" + "\034.hadoop.hdfs.DatanodeIDProto\022\014\n\004size\030\002 " + "\002(\004\022\036\n\014minBlockSize\030\003 \001(\004:\01010485760\"O\n\026G" + "etBlocksResponseProto\0225\n\006blocks\030\001 \002(\0132%." + "hadoop.hdfs.BlocksWithLocationsProto\"\032\n\030" + "GetBlockKeysRequestProto\"N\n\031GetBlockKeys" + "ResponseProto\0221\n\004keys\030\001 \001(\0132#.hadoop.hdf" + "s.ExportedBlockKeysProto\"\036\n\034GetTransacti" + "onIdRequestProto\"-\n\035GetTransactionIdResp" + "onseProto\022\014\n\004txId\030\001 \002(\004\"\031\n\027RollEditLogRe" + "questProto\"T\n\030RollEditLogResponseProto\0228" + "\n\tsignature\030\001 \002(\0132%.hadoop.hdfs.Checkpoi" + "ntSignatureProto\")\n\'GetMostRecentCheckpo" + "intTxIdRequestProto\"8\n(GetMostRecentChec" + "kpointTxIdResponseProto\022\014\n\004txId\030\001 \002(\004\"w\n" + "\027ErrorReportRequestProto\022<\n\014registration" + "\030\001 \002(\0132&.hadoop.hdfs.NamenodeRegistratio" + "nProto\022\021\n\terrorCode\030\002 \002(\r\022\013\n\003msg\030\003 \002(\t\"\032" + "\n\030ErrorReportResponseProto\"T\n\024RegisterRe" + "questProto\022<\n\014registration\030\001 \002(\0132&.hadoo" + "p.hdfs.NamenodeRegistrationProto\"U\n\025Regi" + "sterResponseProto\022<\n\014registration\030\001 \002(\0132" + "&.hadoop.hdfs.NamenodeRegistrationProto\"" + "[\n\033StartCheckpointRequestProto\022<\n\014regist" + "ration\030\001 \002(\0132&.hadoop.hdfs.NamenodeRegis" + "trationProto\"R\n\034StartCheckpointResponseP" + "roto\0222\n\007command\030\001 \002(\0132!.hadoop.hdfs.Name" + "nodeCommandProto\"\223\001\n\031EndCheckpointReques" + "tProto\022<\n\014registration\030\001 \002(\0132&.hadoop.hd" + "fs.NamenodeRegistrationProto\0228\n\tsignatur" + "e\030\002 \002(\0132%.hadoop.hdfs.CheckpointSignatur" + "eProto\"\034\n\032EndCheckpointResponseProto\"3\n\036" + "GetEditLogManifestRequestProto\022\021\n\tsinceT" + "xId\030\001 \002(\004\"\\\n\037GetEditLogManifestResponseP" + "roto\0229\n\010manifest\030\001 \002(\0132\'.hadoop.hdfs.Rem" + "oteEditLogManifestProto\" \n\036IsUpgradeFina" + "lizedRequestProto\"=\n\037IsUpgradeFinalizedR" + "esponseProto\022\032\n\022isUpgradeFinalized\030\001 \002(\010" + "\"\036\n\034IsRollingUpgradeRequestProto\"9\n\035IsRo" + "llingUpgradeResponseProto\022\030\n\020isRollingUp" + "grade\030\001 \002(\010\")\n\027GetFilePathRequestProto\022\016" + "\n\006fileId\030\001 \002(\004\"+\n\030GetFilePathResponsePro" + "to\022\017\n\007srcPath\030\001 \002(\t\"\034\n\032GetNextSPSPathReq" + "uestProto\".\n\033GetNextSPSPathResponseProto" + "\022\017\n\007spsPath\030\001 \001(\0042\243\r\n\027NamenodeProtocolSe" + "rvice\022f\n\tgetBlocks\022+.hadoop.hdfs.namenod" + "e.GetBlocksRequestProto\032,.hadoop.hdfs.na" + "menode.GetBlocksResponseProto\022o\n\014getBloc" + "kKeys\022..hadoop.hdfs.namenode.GetBlockKey" + "sRequestProto\032/.hadoop.hdfs.namenode.Get" + "BlockKeysResponseProto\022{\n\020getTransaction" + "Id\0222.hadoop.hdfs.namenode.GetTransaction" + "IdRequestProto\0323.hadoop.hdfs.namenode.Ge" + "tTransactionIdResponseProto\022\234\001\n\033getMostR" + "ecentCheckpointTxId\022=.hadoop.hdfs.nameno" + "de.GetMostRecentCheckpointTxIdRequestPro" + "to\032>.hadoop.hdfs.namenode.GetMostRecentC" + "heckpointTxIdResponseProto\022l\n\013rollEditLo" + "g\022-.hadoop.hdfs.namenode.RollEditLogRequ" + "estProto\032..hadoop.hdfs.namenode.RollEdit" + "LogResponseProto\022U\n\016versionRequest\022 .had" + "oop.hdfs.VersionRequestProto\032!.hadoop.hd" + "fs.VersionResponseProto\022l\n\013errorReport\022-" + ".hadoop.hdfs.namenode.ErrorReportRequest" + "Proto\032..hadoop.hdfs.namenode.ErrorReport" + "ResponseProto\022v\n\033registerSubordinateName" + "node\022*.hadoop.hdfs.namenode.RegisterRequ" + "estProto\032+.hadoop.hdfs.namenode.Register" + "ResponseProto\022x\n\017startCheckpoint\0221.hadoo" + "p.hdfs.namenode.StartCheckpointRequestPr" + "oto\0322.hadoop.hdfs.namenode.StartCheckpoi" + "ntResponseProto\022r\n\rendCheckpoint\022/.hadoo" + "p.hdfs.namenode.EndCheckpointRequestProt" + "o\0320.hadoop.hdfs.namenode.EndCheckpointRe" + "sponseProto\022\201\001\n\022getEditLogManifest\0224.had" + "oop.hdfs.namenode.GetEditLogManifestRequ" + "estProto\0325.hadoop.hdfs.namenode.GetEditL" + "ogManifestResponseProto\022\201\001\n\022isUpgradeFin" + "alized\0224.hadoop.hdfs.namenode.IsUpgradeF" + "inalizedRequestProto\0325.hadoop.hdfs.namen" + "ode.IsUpgradeFinalizedResponseProto\022{\n\020i" + "sRollingUpgrade\0222.hadoop.hdfs.namenode.I" + "sRollingUpgradeRequestProto\0323.hadoop.hdf" + "s.namenode.IsRollingUpgradeResponseProto" + "\022u\n\016getNextSPSPath\0220.hadoop.hdfs.namenod" + "e.GetNextSPSPathRequestProto\0321.hadoop.hd" + "fs.namenode.GetNextSPSPathResponseProtoB" + "E\n%org.apache.hadoop.hdfs.protocol.proto" + "B\026NamenodeProtocolProtos\210\001\001\240\001\001" }; org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { public org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor root) { descriptor = root; return null; } }; org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.getDescriptor(), }, assigner); internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetBlocksRequestProto_descriptor, new java.lang.String[] { "Datanode", "Size", "MinBlockSize", }); internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetBlocksResponseProto_descriptor, new java.lang.String[] { "Blocks", }); internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetBlockKeysRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetBlockKeysResponseProto_descriptor, new java.lang.String[] { "Keys", }); internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetTransactionIdRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetTransactionIdResponseProto_descriptor, new java.lang.String[] { "TxId", }); internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_RollEditLogRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_RollEditLogResponseProto_descriptor, new java.lang.String[] { "Signature", }); internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetMostRecentCheckpointTxIdResponseProto_descriptor, new java.lang.String[] { "TxId", }); internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_ErrorReportRequestProto_descriptor, new java.lang.String[] { "Registration", "ErrorCode", "Msg", }); internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_descriptor = getDescriptor().getMessageTypes().get(11); internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_ErrorReportResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_RegisterRequestProto_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hadoop_hdfs_namenode_RegisterRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_RegisterRequestProto_descriptor, new java.lang.String[] { "Registration", }); internal_static_hadoop_hdfs_namenode_RegisterResponseProto_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_hadoop_hdfs_namenode_RegisterResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_RegisterResponseProto_descriptor, new java.lang.String[] { "Registration", }); internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_StartCheckpointRequestProto_descriptor, new java.lang.String[] { "Registration", }); internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_StartCheckpointResponseProto_descriptor, new java.lang.String[] { "Command", }); internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_descriptor = getDescriptor().getMessageTypes().get(16); internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_EndCheckpointRequestProto_descriptor, new java.lang.String[] { "Registration", "Signature", }); internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_descriptor = getDescriptor().getMessageTypes().get(17); internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_EndCheckpointResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetEditLogManifestRequestProto_descriptor, new java.lang.String[] { "SinceTxId", }); internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_descriptor = getDescriptor().getMessageTypes().get(19); internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetEditLogManifestResponseProto_descriptor, new java.lang.String[] { "Manifest", }); internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_descriptor = getDescriptor().getMessageTypes().get(21); internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_IsUpgradeFinalizedResponseProto_descriptor, new java.lang.String[] { "IsUpgradeFinalized", }); internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_descriptor = getDescriptor().getMessageTypes().get(22); internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_IsRollingUpgradeRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_IsRollingUpgradeResponseProto_descriptor, new java.lang.String[] { "IsRollingUpgrade", }); internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetFilePathRequestProto_descriptor, new java.lang.String[] { "FileId", }); internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_descriptor = getDescriptor().getMessageTypes().get(25); internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetFilePathResponseProto_descriptor, new java.lang.String[] { "SrcPath", }); internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_descriptor = getDescriptor().getMessageTypes().get(26); internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetNextSPSPathRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_descriptor = getDescriptor().getMessageTypes().get(27); internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_namenode_GetNextSPSPathResponseProto_descriptor, new java.lang.String[] { "SpsPath", }); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(); org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy