All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos Maven / Gradle / Ivy

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: QJournalProtocol.proto

package org.apache.hadoop.hdfs.qjournal.protocol;

public final class QJournalProtocolProtos {
  private QJournalProtocolProtos() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public interface JournalIdProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required string identifier = 1;
    boolean hasIdentifier();
    String getIdentifier();
  }
  public static final class JournalIdProto extends
      com.google.protobuf.GeneratedMessage
      implements JournalIdProtoOrBuilder {
    // Use JournalIdProto.newBuilder() to construct.
    private JournalIdProto(Builder builder) {
      super(builder);
    }
    private JournalIdProto(boolean noInit) {}
    
    private static final JournalIdProto defaultInstance;
    public static JournalIdProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public JournalIdProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalIdProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalIdProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required string identifier = 1;
    public static final int IDENTIFIER_FIELD_NUMBER = 1;
    private java.lang.Object identifier_;
    public boolean hasIdentifier() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public String getIdentifier() {
      java.lang.Object ref = identifier_;
      if (ref instanceof String) {
        return (String) ref;
      } else {
        com.google.protobuf.ByteString bs = 
            (com.google.protobuf.ByteString) ref;
        String s = bs.toStringUtf8();
        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
          identifier_ = s;
        }
        return s;
      }
    }
    private com.google.protobuf.ByteString getIdentifierBytes() {
      java.lang.Object ref = identifier_;
      if (ref instanceof String) {
        com.google.protobuf.ByteString b = 
            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
        identifier_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }
    
    private void initFields() {
      identifier_ = "";
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasIdentifier()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeBytes(1, getIdentifierBytes());
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeBytesSize(1, getIdentifierBytes());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;
      
      boolean result = true;
      result = result && (hasIdentifier() == other.hasIdentifier());
      if (hasIdentifier()) {
        result = result && getIdentifier()
            .equals(other.getIdentifier());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasIdentifier()) {
        hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
        hash = (53 * hash) + getIdentifier().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalIdProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalIdProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        identifier_ = "";
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        result.identifier_ = identifier_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
        if (other.hasIdentifier()) {
          setIdentifier(other.getIdentifier());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasIdentifier()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              bitField0_ |= 0x00000001;
              identifier_ = input.readBytes();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required string identifier = 1;
      private java.lang.Object identifier_ = "";
      public boolean hasIdentifier() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public String getIdentifier() {
        java.lang.Object ref = identifier_;
        if (!(ref instanceof String)) {
          String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
          identifier_ = s;
          return s;
        } else {
          return (String) ref;
        }
      }
      public Builder setIdentifier(String value) {
        if (value == null) {
    throw new NullPointerException();
  }
  bitField0_ |= 0x00000001;
        identifier_ = value;
        onChanged();
        return this;
      }
      public Builder clearIdentifier() {
        bitField0_ = (bitField0_ & ~0x00000001);
        identifier_ = getDefaultInstance().getIdentifier();
        onChanged();
        return this;
      }
      void setIdentifier(com.google.protobuf.ByteString value) {
        bitField0_ |= 0x00000001;
        identifier_ = value;
        onChanged();
      }
      
      // @@protoc_insertion_point(builder_scope:JournalIdProto)
    }
    
    static {
      defaultInstance = new JournalIdProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:JournalIdProto)
  }
  
  public interface RequestInfoProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .JournalIdProto journalId = 1;
    boolean hasJournalId();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();
    
    // required uint64 epoch = 2;
    boolean hasEpoch();
    long getEpoch();
    
    // required uint64 ipcSerialNumber = 3;
    boolean hasIpcSerialNumber();
    long getIpcSerialNumber();
    
    // optional uint64 committedTxId = 4;
    boolean hasCommittedTxId();
    long getCommittedTxId();
  }
  public static final class RequestInfoProto extends
      com.google.protobuf.GeneratedMessage
      implements RequestInfoProtoOrBuilder {
    // Use RequestInfoProto.newBuilder() to construct.
    private RequestInfoProto(Builder builder) {
      super(builder);
    }
    private RequestInfoProto(boolean noInit) {}
    
    private static final RequestInfoProto defaultInstance;
    public static RequestInfoProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public RequestInfoProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_RequestInfoProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_RequestInfoProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .JournalIdProto journalId = 1;
    public static final int JOURNALID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_;
    public boolean hasJournalId() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
      return journalId_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
      return journalId_;
    }
    
    // required uint64 epoch = 2;
    public static final int EPOCH_FIELD_NUMBER = 2;
    private long epoch_;
    public boolean hasEpoch() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getEpoch() {
      return epoch_;
    }
    
    // required uint64 ipcSerialNumber = 3;
    public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3;
    private long ipcSerialNumber_;
    public boolean hasIpcSerialNumber() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public long getIpcSerialNumber() {
      return ipcSerialNumber_;
    }
    
    // optional uint64 committedTxId = 4;
    public static final int COMMITTEDTXID_FIELD_NUMBER = 4;
    private long committedTxId_;
    public boolean hasCommittedTxId() {
      return ((bitField0_ & 0x00000008) == 0x00000008);
    }
    public long getCommittedTxId() {
      return committedTxId_;
    }
    
    private void initFields() {
      journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      epoch_ = 0L;
      ipcSerialNumber_ = 0L;
      committedTxId_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasJournalId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEpoch()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIpcSerialNumber()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getJournalId().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, journalId_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, epoch_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeUInt64(3, ipcSerialNumber_);
      }
      if (((bitField0_ & 0x00000008) == 0x00000008)) {
        output.writeUInt64(4, committedTxId_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, journalId_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, epoch_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(3, ipcSerialNumber_);
      }
      if (((bitField0_ & 0x00000008) == 0x00000008)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(4, committedTxId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj;
      
      boolean result = true;
      result = result && (hasJournalId() == other.hasJournalId());
      if (hasJournalId()) {
        result = result && getJournalId()
            .equals(other.getJournalId());
      }
      result = result && (hasEpoch() == other.hasEpoch());
      if (hasEpoch()) {
        result = result && (getEpoch()
            == other.getEpoch());
      }
      result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber());
      if (hasIpcSerialNumber()) {
        result = result && (getIpcSerialNumber()
            == other.getIpcSerialNumber());
      }
      result = result && (hasCommittedTxId() == other.hasCommittedTxId());
      if (hasCommittedTxId()) {
        result = result && (getCommittedTxId()
            == other.getCommittedTxId());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasJournalId()) {
        hash = (37 * hash) + JOURNALID_FIELD_NUMBER;
        hash = (53 * hash) + getJournalId().hashCode();
      }
      if (hasEpoch()) {
        hash = (37 * hash) + EPOCH_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getEpoch());
      }
      if (hasIpcSerialNumber()) {
        hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getIpcSerialNumber());
      }
      if (hasCommittedTxId()) {
        hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getCommittedTxId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_RequestInfoProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_RequestInfoProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getJournalIdFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (journalIdBuilder_ == null) {
          journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
        } else {
          journalIdBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        epoch_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        ipcSerialNumber_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000004);
        committedTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000008);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (journalIdBuilder_ == null) {
          result.journalId_ = journalId_;
        } else {
          result.journalId_ = journalIdBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.epoch_ = epoch_;
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.ipcSerialNumber_ = ipcSerialNumber_;
        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
          to_bitField0_ |= 0x00000008;
        }
        result.committedTxId_ = committedTxId_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this;
        if (other.hasJournalId()) {
          mergeJournalId(other.getJournalId());
        }
        if (other.hasEpoch()) {
          setEpoch(other.getEpoch());
        }
        if (other.hasIpcSerialNumber()) {
          setIpcSerialNumber(other.getIpcSerialNumber());
        }
        if (other.hasCommittedTxId()) {
          setCommittedTxId(other.getCommittedTxId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasJournalId()) {
          
          return false;
        }
        if (!hasEpoch()) {
          
          return false;
        }
        if (!hasIpcSerialNumber()) {
          
          return false;
        }
        if (!getJournalId().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
              if (hasJournalId()) {
                subBuilder.mergeFrom(getJournalId());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setJournalId(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              epoch_ = input.readUInt64();
              break;
            }
            case 24: {
              bitField0_ |= 0x00000004;
              ipcSerialNumber_ = input.readUInt64();
              break;
            }
            case 32: {
              bitField0_ |= 0x00000008;
              committedTxId_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .JournalIdProto journalId = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_;
      public boolean hasJournalId() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
        if (journalIdBuilder_ == null) {
          return journalId_;
        } else {
          return journalIdBuilder_.getMessage();
        }
      }
      public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (journalIdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          journalId_ = value;
          onChanged();
        } else {
          journalIdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setJournalId(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
        if (journalIdBuilder_ == null) {
          journalId_ = builderForValue.build();
          onChanged();
        } else {
          journalIdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (journalIdBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
            journalId_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial();
          } else {
            journalId_ = value;
          }
          onChanged();
        } else {
          journalIdBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearJournalId() {
        if (journalIdBuilder_ == null) {
          journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
          onChanged();
        } else {
          journalIdBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getJournalIdFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
        if (journalIdBuilder_ != null) {
          return journalIdBuilder_.getMessageOrBuilder();
        } else {
          return journalId_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
          getJournalIdFieldBuilder() {
        if (journalIdBuilder_ == null) {
          journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
                  journalId_,
                  getParentForChildren(),
                  isClean());
          journalId_ = null;
        }
        return journalIdBuilder_;
      }
      
      // required uint64 epoch = 2;
      private long epoch_ ;
      public boolean hasEpoch() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getEpoch() {
        return epoch_;
      }
      public Builder setEpoch(long value) {
        bitField0_ |= 0x00000002;
        epoch_ = value;
        onChanged();
        return this;
      }
      public Builder clearEpoch() {
        bitField0_ = (bitField0_ & ~0x00000002);
        epoch_ = 0L;
        onChanged();
        return this;
      }
      
      // required uint64 ipcSerialNumber = 3;
      private long ipcSerialNumber_ ;
      public boolean hasIpcSerialNumber() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public long getIpcSerialNumber() {
        return ipcSerialNumber_;
      }
      public Builder setIpcSerialNumber(long value) {
        bitField0_ |= 0x00000004;
        ipcSerialNumber_ = value;
        onChanged();
        return this;
      }
      public Builder clearIpcSerialNumber() {
        bitField0_ = (bitField0_ & ~0x00000004);
        ipcSerialNumber_ = 0L;
        onChanged();
        return this;
      }
      
      // optional uint64 committedTxId = 4;
      private long committedTxId_ ;
      public boolean hasCommittedTxId() {
        return ((bitField0_ & 0x00000008) == 0x00000008);
      }
      public long getCommittedTxId() {
        return committedTxId_;
      }
      public Builder setCommittedTxId(long value) {
        bitField0_ |= 0x00000008;
        committedTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearCommittedTxId() {
        bitField0_ = (bitField0_ & ~0x00000008);
        committedTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:RequestInfoProto)
    }
    
    static {
      defaultInstance = new RequestInfoProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:RequestInfoProto)
  }
  
  public interface SegmentStateProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required uint64 startTxId = 1;
    boolean hasStartTxId();
    long getStartTxId();
    
    // required uint64 endTxId = 2;
    boolean hasEndTxId();
    long getEndTxId();
    
    // required bool isInProgress = 3;
    boolean hasIsInProgress();
    boolean getIsInProgress();
  }
  public static final class SegmentStateProto extends
      com.google.protobuf.GeneratedMessage
      implements SegmentStateProtoOrBuilder {
    // Use SegmentStateProto.newBuilder() to construct.
    private SegmentStateProto(Builder builder) {
      super(builder);
    }
    private SegmentStateProto(boolean noInit) {}
    
    private static final SegmentStateProto defaultInstance;
    public static SegmentStateProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public SegmentStateProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_SegmentStateProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_SegmentStateProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required uint64 startTxId = 1;
    public static final int STARTTXID_FIELD_NUMBER = 1;
    private long startTxId_;
    public boolean hasStartTxId() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public long getStartTxId() {
      return startTxId_;
    }
    
    // required uint64 endTxId = 2;
    public static final int ENDTXID_FIELD_NUMBER = 2;
    private long endTxId_;
    public boolean hasEndTxId() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getEndTxId() {
      return endTxId_;
    }
    
    // required bool isInProgress = 3;
    public static final int ISINPROGRESS_FIELD_NUMBER = 3;
    private boolean isInProgress_;
    public boolean hasIsInProgress() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public boolean getIsInProgress() {
      return isInProgress_;
    }
    
    private void initFields() {
      startTxId_ = 0L;
      endTxId_ = 0L;
      isInProgress_ = false;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasStartTxId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEndTxId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIsInProgress()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeUInt64(1, startTxId_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, endTxId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeBool(3, isInProgress_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(1, startTxId_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, endTxId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeBoolSize(3, isInProgress_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj;
      
      boolean result = true;
      result = result && (hasStartTxId() == other.hasStartTxId());
      if (hasStartTxId()) {
        result = result && (getStartTxId()
            == other.getStartTxId());
      }
      result = result && (hasEndTxId() == other.hasEndTxId());
      if (hasEndTxId()) {
        result = result && (getEndTxId()
            == other.getEndTxId());
      }
      result = result && (hasIsInProgress() == other.hasIsInProgress());
      if (hasIsInProgress()) {
        result = result && (getIsInProgress()
            == other.getIsInProgress());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasStartTxId()) {
        hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getStartTxId());
      }
      if (hasEndTxId()) {
        hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getEndTxId());
      }
      if (hasIsInProgress()) {
        hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
        hash = (53 * hash) + hashBoolean(getIsInProgress());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_SegmentStateProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_SegmentStateProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        startTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000001);
        endTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        isInProgress_ = false;
        bitField0_ = (bitField0_ & ~0x00000004);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        result.startTxId_ = startTxId_;
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.endTxId_ = endTxId_;
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.isInProgress_ = isInProgress_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this;
        if (other.hasStartTxId()) {
          setStartTxId(other.getStartTxId());
        }
        if (other.hasEndTxId()) {
          setEndTxId(other.getEndTxId());
        }
        if (other.hasIsInProgress()) {
          setIsInProgress(other.getIsInProgress());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasStartTxId()) {
          
          return false;
        }
        if (!hasEndTxId()) {
          
          return false;
        }
        if (!hasIsInProgress()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 8: {
              bitField0_ |= 0x00000001;
              startTxId_ = input.readUInt64();
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              endTxId_ = input.readUInt64();
              break;
            }
            case 24: {
              bitField0_ |= 0x00000004;
              isInProgress_ = input.readBool();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required uint64 startTxId = 1;
      private long startTxId_ ;
      public boolean hasStartTxId() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public long getStartTxId() {
        return startTxId_;
      }
      public Builder setStartTxId(long value) {
        bitField0_ |= 0x00000001;
        startTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearStartTxId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        startTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // required uint64 endTxId = 2;
      private long endTxId_ ;
      public boolean hasEndTxId() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getEndTxId() {
        return endTxId_;
      }
      public Builder setEndTxId(long value) {
        bitField0_ |= 0x00000002;
        endTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearEndTxId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        endTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // required bool isInProgress = 3;
      private boolean isInProgress_ ;
      public boolean hasIsInProgress() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public boolean getIsInProgress() {
        return isInProgress_;
      }
      public Builder setIsInProgress(boolean value) {
        bitField0_ |= 0x00000004;
        isInProgress_ = value;
        onChanged();
        return this;
      }
      public Builder clearIsInProgress() {
        bitField0_ = (bitField0_ & ~0x00000004);
        isInProgress_ = false;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:SegmentStateProto)
    }
    
    static {
      defaultInstance = new SegmentStateProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:SegmentStateProto)
  }
  
  public interface PersistedRecoveryPaxosDataOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .SegmentStateProto segmentState = 1;
    boolean hasSegmentState();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
    
    // required uint64 acceptedInEpoch = 2;
    boolean hasAcceptedInEpoch();
    long getAcceptedInEpoch();
  }
  public static final class PersistedRecoveryPaxosData extends
      com.google.protobuf.GeneratedMessage
      implements PersistedRecoveryPaxosDataOrBuilder {
    // Use PersistedRecoveryPaxosData.newBuilder() to construct.
    private PersistedRecoveryPaxosData(Builder builder) {
      super(builder);
    }
    private PersistedRecoveryPaxosData(boolean noInit) {}
    
    private static final PersistedRecoveryPaxosData defaultInstance;
    public static PersistedRecoveryPaxosData getDefaultInstance() {
      return defaultInstance;
    }
    
    public PersistedRecoveryPaxosData getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PersistedRecoveryPaxosData_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PersistedRecoveryPaxosData_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .SegmentStateProto segmentState = 1;
    public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
    public boolean hasSegmentState() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
      return segmentState_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
      return segmentState_;
    }
    
    // required uint64 acceptedInEpoch = 2;
    public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
    private long acceptedInEpoch_;
    public boolean hasAcceptedInEpoch() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getAcceptedInEpoch() {
      return acceptedInEpoch_;
    }
    
    private void initFields() {
      segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
      acceptedInEpoch_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasSegmentState()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasAcceptedInEpoch()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getSegmentState().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, segmentState_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, acceptedInEpoch_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, segmentState_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, acceptedInEpoch_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj;
      
      boolean result = true;
      result = result && (hasSegmentState() == other.hasSegmentState());
      if (hasSegmentState()) {
        result = result && getSegmentState()
            .equals(other.getSegmentState());
      }
      result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
      if (hasAcceptedInEpoch()) {
        result = result && (getAcceptedInEpoch()
            == other.getAcceptedInEpoch());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasSegmentState()) {
        hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
        hash = (53 * hash) + getSegmentState().hashCode();
      }
      if (hasAcceptedInEpoch()) {
        hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getAcceptedInEpoch());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PersistedRecoveryPaxosData_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PersistedRecoveryPaxosData_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getSegmentStateFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (segmentStateBuilder_ == null) {
          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
        } else {
          segmentStateBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        acceptedInEpoch_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (segmentStateBuilder_ == null) {
          result.segmentState_ = segmentState_;
        } else {
          result.segmentState_ = segmentStateBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.acceptedInEpoch_ = acceptedInEpoch_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this;
        if (other.hasSegmentState()) {
          mergeSegmentState(other.getSegmentState());
        }
        if (other.hasAcceptedInEpoch()) {
          setAcceptedInEpoch(other.getAcceptedInEpoch());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasSegmentState()) {
          
          return false;
        }
        if (!hasAcceptedInEpoch()) {
          
          return false;
        }
        if (!getSegmentState().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder();
              if (hasSegmentState()) {
                subBuilder.mergeFrom(getSegmentState());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setSegmentState(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              acceptedInEpoch_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .SegmentStateProto segmentState = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
      public boolean hasSegmentState() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
        if (segmentStateBuilder_ == null) {
          return segmentState_;
        } else {
          return segmentStateBuilder_.getMessage();
        }
      }
      public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
        if (segmentStateBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          segmentState_ = value;
          onChanged();
        } else {
          segmentStateBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setSegmentState(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
        if (segmentStateBuilder_ == null) {
          segmentState_ = builderForValue.build();
          onChanged();
        } else {
          segmentStateBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
        if (segmentStateBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
            segmentState_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
          } else {
            segmentState_ = value;
          }
          onChanged();
        } else {
          segmentStateBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearSegmentState() {
        if (segmentStateBuilder_ == null) {
          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
          onChanged();
        } else {
          segmentStateBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getSegmentStateFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
        if (segmentStateBuilder_ != null) {
          return segmentStateBuilder_.getMessageOrBuilder();
        } else {
          return segmentState_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
          getSegmentStateFieldBuilder() {
        if (segmentStateBuilder_ == null) {
          segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
                  segmentState_,
                  getParentForChildren(),
                  isClean());
          segmentState_ = null;
        }
        return segmentStateBuilder_;
      }
      
      // required uint64 acceptedInEpoch = 2;
      private long acceptedInEpoch_ ;
      public boolean hasAcceptedInEpoch() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getAcceptedInEpoch() {
        return acceptedInEpoch_;
      }
      public Builder setAcceptedInEpoch(long value) {
        bitField0_ |= 0x00000002;
        acceptedInEpoch_ = value;
        onChanged();
        return this;
      }
      public Builder clearAcceptedInEpoch() {
        bitField0_ = (bitField0_ & ~0x00000002);
        acceptedInEpoch_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:PersistedRecoveryPaxosData)
    }
    
    static {
      defaultInstance = new PersistedRecoveryPaxosData(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:PersistedRecoveryPaxosData)
  }
  
  public interface JournalRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RequestInfoProto reqInfo = 1;
    boolean hasReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
    
    // required uint64 firstTxnId = 2;
    boolean hasFirstTxnId();
    long getFirstTxnId();
    
    // required uint32 numTxns = 3;
    boolean hasNumTxns();
    int getNumTxns();
    
    // required bytes records = 4;
    boolean hasRecords();
    com.google.protobuf.ByteString getRecords();
    
    // required uint64 segmentTxnId = 5;
    boolean hasSegmentTxnId();
    long getSegmentTxnId();
  }
  public static final class JournalRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements JournalRequestProtoOrBuilder {
    // Use JournalRequestProto.newBuilder() to construct.
    private JournalRequestProto(Builder builder) {
      super(builder);
    }
    private JournalRequestProto(boolean noInit) {}
    
    private static final JournalRequestProto defaultInstance;
    public static JournalRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public JournalRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RequestInfoProto reqInfo = 1;
    public static final int REQINFO_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
    public boolean hasReqInfo() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
      return reqInfo_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
      return reqInfo_;
    }
    
    // required uint64 firstTxnId = 2;
    public static final int FIRSTTXNID_FIELD_NUMBER = 2;
    private long firstTxnId_;
    public boolean hasFirstTxnId() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getFirstTxnId() {
      return firstTxnId_;
    }
    
    // required uint32 numTxns = 3;
    public static final int NUMTXNS_FIELD_NUMBER = 3;
    private int numTxns_;
    public boolean hasNumTxns() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public int getNumTxns() {
      return numTxns_;
    }
    
    // required bytes records = 4;
    public static final int RECORDS_FIELD_NUMBER = 4;
    private com.google.protobuf.ByteString records_;
    public boolean hasRecords() {
      return ((bitField0_ & 0x00000008) == 0x00000008);
    }
    public com.google.protobuf.ByteString getRecords() {
      return records_;
    }
    
    // required uint64 segmentTxnId = 5;
    public static final int SEGMENTTXNID_FIELD_NUMBER = 5;
    private long segmentTxnId_;
    public boolean hasSegmentTxnId() {
      return ((bitField0_ & 0x00000010) == 0x00000010);
    }
    public long getSegmentTxnId() {
      return segmentTxnId_;
    }
    
    private void initFields() {
      reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      firstTxnId_ = 0L;
      numTxns_ = 0;
      records_ = com.google.protobuf.ByteString.EMPTY;
      segmentTxnId_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasReqInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFirstTxnId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNumTxns()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasRecords()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSegmentTxnId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getReqInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, firstTxnId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeUInt32(3, numTxns_);
      }
      if (((bitField0_ & 0x00000008) == 0x00000008)) {
        output.writeBytes(4, records_);
      }
      if (((bitField0_ & 0x00000010) == 0x00000010)) {
        output.writeUInt64(5, segmentTxnId_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, firstTxnId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt32Size(3, numTxns_);
      }
      if (((bitField0_ & 0x00000008) == 0x00000008)) {
        size += com.google.protobuf.CodedOutputStream
          .computeBytesSize(4, records_);
      }
      if (((bitField0_ & 0x00000010) == 0x00000010)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(5, segmentTxnId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj;
      
      boolean result = true;
      result = result && (hasReqInfo() == other.hasReqInfo());
      if (hasReqInfo()) {
        result = result && getReqInfo()
            .equals(other.getReqInfo());
      }
      result = result && (hasFirstTxnId() == other.hasFirstTxnId());
      if (hasFirstTxnId()) {
        result = result && (getFirstTxnId()
            == other.getFirstTxnId());
      }
      result = result && (hasNumTxns() == other.hasNumTxns());
      if (hasNumTxns()) {
        result = result && (getNumTxns()
            == other.getNumTxns());
      }
      result = result && (hasRecords() == other.hasRecords());
      if (hasRecords()) {
        result = result && getRecords()
            .equals(other.getRecords());
      }
      result = result && (hasSegmentTxnId() == other.hasSegmentTxnId());
      if (hasSegmentTxnId()) {
        result = result && (getSegmentTxnId()
            == other.getSegmentTxnId());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasReqInfo()) {
        hash = (37 * hash) + REQINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReqInfo().hashCode();
      }
      if (hasFirstTxnId()) {
        hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getFirstTxnId());
      }
      if (hasNumTxns()) {
        hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
        hash = (53 * hash) + getNumTxns();
      }
      if (hasRecords()) {
        hash = (37 * hash) + RECORDS_FIELD_NUMBER;
        hash = (53 * hash) + getRecords().hashCode();
      }
      if (hasSegmentTxnId()) {
        hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getSegmentTxnId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getReqInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        firstTxnId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        numTxns_ = 0;
        bitField0_ = (bitField0_ & ~0x00000004);
        records_ = com.google.protobuf.ByteString.EMPTY;
        bitField0_ = (bitField0_ & ~0x00000008);
        segmentTxnId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000010);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (reqInfoBuilder_ == null) {
          result.reqInfo_ = reqInfo_;
        } else {
          result.reqInfo_ = reqInfoBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.firstTxnId_ = firstTxnId_;
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.numTxns_ = numTxns_;
        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
          to_bitField0_ |= 0x00000008;
        }
        result.records_ = records_;
        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
          to_bitField0_ |= 0x00000010;
        }
        result.segmentTxnId_ = segmentTxnId_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
        if (other.hasReqInfo()) {
          mergeReqInfo(other.getReqInfo());
        }
        if (other.hasFirstTxnId()) {
          setFirstTxnId(other.getFirstTxnId());
        }
        if (other.hasNumTxns()) {
          setNumTxns(other.getNumTxns());
        }
        if (other.hasRecords()) {
          setRecords(other.getRecords());
        }
        if (other.hasSegmentTxnId()) {
          setSegmentTxnId(other.getSegmentTxnId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasReqInfo()) {
          
          return false;
        }
        if (!hasFirstTxnId()) {
          
          return false;
        }
        if (!hasNumTxns()) {
          
          return false;
        }
        if (!hasRecords()) {
          
          return false;
        }
        if (!hasSegmentTxnId()) {
          
          return false;
        }
        if (!getReqInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
              if (hasReqInfo()) {
                subBuilder.mergeFrom(getReqInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setReqInfo(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              firstTxnId_ = input.readUInt64();
              break;
            }
            case 24: {
              bitField0_ |= 0x00000004;
              numTxns_ = input.readUInt32();
              break;
            }
            case 34: {
              bitField0_ |= 0x00000008;
              records_ = input.readBytes();
              break;
            }
            case 40: {
              bitField0_ |= 0x00000010;
              segmentTxnId_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RequestInfoProto reqInfo = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
      public boolean hasReqInfo() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
        if (reqInfoBuilder_ == null) {
          return reqInfo_;
        } else {
          return reqInfoBuilder_.getMessage();
        }
      }
      public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reqInfo_ = value;
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setReqInfo(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = builderForValue.build();
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
            reqInfo_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
          } else {
            reqInfo_ = value;
          }
          onChanged();
        } else {
          reqInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearReqInfo() {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
          onChanged();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getReqInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
        if (reqInfoBuilder_ != null) {
          return reqInfoBuilder_.getMessageOrBuilder();
        } else {
          return reqInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
          getReqInfoFieldBuilder() {
        if (reqInfoBuilder_ == null) {
          reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
                  reqInfo_,
                  getParentForChildren(),
                  isClean());
          reqInfo_ = null;
        }
        return reqInfoBuilder_;
      }
      
      // required uint64 firstTxnId = 2;
      private long firstTxnId_ ;
      public boolean hasFirstTxnId() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getFirstTxnId() {
        return firstTxnId_;
      }
      public Builder setFirstTxnId(long value) {
        bitField0_ |= 0x00000002;
        firstTxnId_ = value;
        onChanged();
        return this;
      }
      public Builder clearFirstTxnId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        firstTxnId_ = 0L;
        onChanged();
        return this;
      }
      
      // required uint32 numTxns = 3;
      private int numTxns_ ;
      public boolean hasNumTxns() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public int getNumTxns() {
        return numTxns_;
      }
      public Builder setNumTxns(int value) {
        bitField0_ |= 0x00000004;
        numTxns_ = value;
        onChanged();
        return this;
      }
      public Builder clearNumTxns() {
        bitField0_ = (bitField0_ & ~0x00000004);
        numTxns_ = 0;
        onChanged();
        return this;
      }
      
      // required bytes records = 4;
      private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
      public boolean hasRecords() {
        return ((bitField0_ & 0x00000008) == 0x00000008);
      }
      public com.google.protobuf.ByteString getRecords() {
        return records_;
      }
      public Builder setRecords(com.google.protobuf.ByteString value) {
        if (value == null) {
    throw new NullPointerException();
  }
  bitField0_ |= 0x00000008;
        records_ = value;
        onChanged();
        return this;
      }
      public Builder clearRecords() {
        bitField0_ = (bitField0_ & ~0x00000008);
        records_ = getDefaultInstance().getRecords();
        onChanged();
        return this;
      }
      
      // required uint64 segmentTxnId = 5;
      private long segmentTxnId_ ;
      public boolean hasSegmentTxnId() {
        return ((bitField0_ & 0x00000010) == 0x00000010);
      }
      public long getSegmentTxnId() {
        return segmentTxnId_;
      }
      public Builder setSegmentTxnId(long value) {
        bitField0_ |= 0x00000010;
        segmentTxnId_ = value;
        onChanged();
        return this;
      }
      public Builder clearSegmentTxnId() {
        bitField0_ = (bitField0_ & ~0x00000010);
        segmentTxnId_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:JournalRequestProto)
    }
    
    static {
      defaultInstance = new JournalRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:JournalRequestProto)
  }
  
  public interface JournalResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
  }
  public static final class JournalResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements JournalResponseProtoOrBuilder {
    // Use JournalResponseProto.newBuilder() to construct.
    private JournalResponseProto(Builder builder) {
      super(builder);
    }
    private JournalResponseProto(boolean noInit) {}
    
    private static final JournalResponseProto defaultInstance;
    public static JournalResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public JournalResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable;
    }
    
    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj;
      
      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this);
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
          }
        }
      }
      
      
      // @@protoc_insertion_point(builder_scope:JournalResponseProto)
    }
    
    static {
      defaultInstance = new JournalResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:JournalResponseProto)
  }
  
  public interface HeartbeatRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RequestInfoProto reqInfo = 1;
    boolean hasReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
  }
  public static final class HeartbeatRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements HeartbeatRequestProtoOrBuilder {
    // Use HeartbeatRequestProto.newBuilder() to construct.
    private HeartbeatRequestProto(Builder builder) {
      super(builder);
    }
    private HeartbeatRequestProto(boolean noInit) {}
    
    private static final HeartbeatRequestProto defaultInstance;
    public static HeartbeatRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public HeartbeatRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RequestInfoProto reqInfo = 1;
    public static final int REQINFO_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
    public boolean hasReqInfo() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
      return reqInfo_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
      return reqInfo_;
    }
    
    private void initFields() {
      reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasReqInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getReqInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, reqInfo_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, reqInfo_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj;
      
      boolean result = true;
      result = result && (hasReqInfo() == other.hasReqInfo());
      if (hasReqInfo()) {
        result = result && getReqInfo()
            .equals(other.getReqInfo());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasReqInfo()) {
        hash = (37 * hash) + REQINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReqInfo().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getReqInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (reqInfoBuilder_ == null) {
          result.reqInfo_ = reqInfo_;
        } else {
          result.reqInfo_ = reqInfoBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
        if (other.hasReqInfo()) {
          mergeReqInfo(other.getReqInfo());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasReqInfo()) {
          
          return false;
        }
        if (!getReqInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
              if (hasReqInfo()) {
                subBuilder.mergeFrom(getReqInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setReqInfo(subBuilder.buildPartial());
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RequestInfoProto reqInfo = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
      public boolean hasReqInfo() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
        if (reqInfoBuilder_ == null) {
          return reqInfo_;
        } else {
          return reqInfoBuilder_.getMessage();
        }
      }
      public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reqInfo_ = value;
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setReqInfo(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = builderForValue.build();
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
            reqInfo_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
          } else {
            reqInfo_ = value;
          }
          onChanged();
        } else {
          reqInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearReqInfo() {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
          onChanged();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getReqInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
        if (reqInfoBuilder_ != null) {
          return reqInfoBuilder_.getMessageOrBuilder();
        } else {
          return reqInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
          getReqInfoFieldBuilder() {
        if (reqInfoBuilder_ == null) {
          reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
                  reqInfo_,
                  getParentForChildren(),
                  isClean());
          reqInfo_ = null;
        }
        return reqInfoBuilder_;
      }
      
      // @@protoc_insertion_point(builder_scope:HeartbeatRequestProto)
    }
    
    static {
      defaultInstance = new HeartbeatRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:HeartbeatRequestProto)
  }
  
  public interface HeartbeatResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
  }
  public static final class HeartbeatResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements HeartbeatResponseProtoOrBuilder {
    // Use HeartbeatResponseProto.newBuilder() to construct.
    private HeartbeatResponseProto(Builder builder) {
      super(builder);
    }
    private HeartbeatResponseProto(boolean noInit) {}
    
    private static final HeartbeatResponseProto defaultInstance;
    public static HeartbeatResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public HeartbeatResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable;
    }
    
    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj;
      
      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this);
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
          }
        }
      }
      
      
      // @@protoc_insertion_point(builder_scope:HeartbeatResponseProto)
    }
    
    static {
      defaultInstance = new HeartbeatResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:HeartbeatResponseProto)
  }
  
  public interface StartLogSegmentRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RequestInfoProto reqInfo = 1;
    boolean hasReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
    
    // required uint64 txid = 2;
    boolean hasTxid();
    long getTxid();
  }
  public static final class StartLogSegmentRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements StartLogSegmentRequestProtoOrBuilder {
    // Use StartLogSegmentRequestProto.newBuilder() to construct.
    private StartLogSegmentRequestProto(Builder builder) {
      super(builder);
    }
    private StartLogSegmentRequestProto(boolean noInit) {}
    
    private static final StartLogSegmentRequestProto defaultInstance;
    public static StartLogSegmentRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public StartLogSegmentRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RequestInfoProto reqInfo = 1;
    public static final int REQINFO_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
    public boolean hasReqInfo() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
      return reqInfo_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
      return reqInfo_;
    }
    
    // required uint64 txid = 2;
    public static final int TXID_FIELD_NUMBER = 2;
    private long txid_;
    public boolean hasTxid() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getTxid() {
      return txid_;
    }
    
    private void initFields() {
      reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      txid_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasReqInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasTxid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getReqInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, txid_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, txid_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj;
      
      boolean result = true;
      result = result && (hasReqInfo() == other.hasReqInfo());
      if (hasReqInfo()) {
        result = result && getReqInfo()
            .equals(other.getReqInfo());
      }
      result = result && (hasTxid() == other.hasTxid());
      if (hasTxid()) {
        result = result && (getTxid()
            == other.getTxid());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasReqInfo()) {
        hash = (37 * hash) + REQINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReqInfo().hashCode();
      }
      if (hasTxid()) {
        hash = (37 * hash) + TXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getTxid());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getReqInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        txid_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (reqInfoBuilder_ == null) {
          result.reqInfo_ = reqInfo_;
        } else {
          result.reqInfo_ = reqInfoBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.txid_ = txid_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
        if (other.hasReqInfo()) {
          mergeReqInfo(other.getReqInfo());
        }
        if (other.hasTxid()) {
          setTxid(other.getTxid());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasReqInfo()) {
          
          return false;
        }
        if (!hasTxid()) {
          
          return false;
        }
        if (!getReqInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
              if (hasReqInfo()) {
                subBuilder.mergeFrom(getReqInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setReqInfo(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              txid_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RequestInfoProto reqInfo = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
      public boolean hasReqInfo() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
        if (reqInfoBuilder_ == null) {
          return reqInfo_;
        } else {
          return reqInfoBuilder_.getMessage();
        }
      }
      public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reqInfo_ = value;
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setReqInfo(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = builderForValue.build();
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
            reqInfo_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
          } else {
            reqInfo_ = value;
          }
          onChanged();
        } else {
          reqInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearReqInfo() {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
          onChanged();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getReqInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
        if (reqInfoBuilder_ != null) {
          return reqInfoBuilder_.getMessageOrBuilder();
        } else {
          return reqInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
          getReqInfoFieldBuilder() {
        if (reqInfoBuilder_ == null) {
          reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
                  reqInfo_,
                  getParentForChildren(),
                  isClean());
          reqInfo_ = null;
        }
        return reqInfoBuilder_;
      }
      
      // required uint64 txid = 2;
      private long txid_ ;
      public boolean hasTxid() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getTxid() {
        return txid_;
      }
      public Builder setTxid(long value) {
        bitField0_ |= 0x00000002;
        txid_ = value;
        onChanged();
        return this;
      }
      public Builder clearTxid() {
        bitField0_ = (bitField0_ & ~0x00000002);
        txid_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:StartLogSegmentRequestProto)
    }
    
    static {
      defaultInstance = new StartLogSegmentRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:StartLogSegmentRequestProto)
  }
  
  public interface StartLogSegmentResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
  }
  public static final class StartLogSegmentResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements StartLogSegmentResponseProtoOrBuilder {
    // Use StartLogSegmentResponseProto.newBuilder() to construct.
    private StartLogSegmentResponseProto(Builder builder) {
      super(builder);
    }
    private StartLogSegmentResponseProto(boolean noInit) {}
    
    private static final StartLogSegmentResponseProto defaultInstance;
    public static StartLogSegmentResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public StartLogSegmentResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
    }
    
    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj;
      
      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this);
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
          }
        }
      }
      
      
      // @@protoc_insertion_point(builder_scope:StartLogSegmentResponseProto)
    }
    
    static {
      defaultInstance = new StartLogSegmentResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:StartLogSegmentResponseProto)
  }
  
  public interface FinalizeLogSegmentRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RequestInfoProto reqInfo = 1;
    boolean hasReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
    
    // required uint64 startTxId = 2;
    boolean hasStartTxId();
    long getStartTxId();
    
    // required uint64 endTxId = 3;
    boolean hasEndTxId();
    long getEndTxId();
  }
  public static final class FinalizeLogSegmentRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements FinalizeLogSegmentRequestProtoOrBuilder {
    // Use FinalizeLogSegmentRequestProto.newBuilder() to construct.
    private FinalizeLogSegmentRequestProto(Builder builder) {
      super(builder);
    }
    private FinalizeLogSegmentRequestProto(boolean noInit) {}
    
    private static final FinalizeLogSegmentRequestProto defaultInstance;
    public static FinalizeLogSegmentRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RequestInfoProto reqInfo = 1;
    public static final int REQINFO_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
    public boolean hasReqInfo() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
      return reqInfo_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
      return reqInfo_;
    }
    
    // required uint64 startTxId = 2;
    public static final int STARTTXID_FIELD_NUMBER = 2;
    private long startTxId_;
    public boolean hasStartTxId() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getStartTxId() {
      return startTxId_;
    }
    
    // required uint64 endTxId = 3;
    public static final int ENDTXID_FIELD_NUMBER = 3;
    private long endTxId_;
    public boolean hasEndTxId() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public long getEndTxId() {
      return endTxId_;
    }
    
    private void initFields() {
      reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      startTxId_ = 0L;
      endTxId_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasReqInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasStartTxId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEndTxId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getReqInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, startTxId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeUInt64(3, endTxId_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, startTxId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(3, endTxId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj;
      
      boolean result = true;
      result = result && (hasReqInfo() == other.hasReqInfo());
      if (hasReqInfo()) {
        result = result && getReqInfo()
            .equals(other.getReqInfo());
      }
      result = result && (hasStartTxId() == other.hasStartTxId());
      if (hasStartTxId()) {
        result = result && (getStartTxId()
            == other.getStartTxId());
      }
      result = result && (hasEndTxId() == other.hasEndTxId());
      if (hasEndTxId()) {
        result = result && (getEndTxId()
            == other.getEndTxId());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasReqInfo()) {
        hash = (37 * hash) + REQINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReqInfo().hashCode();
      }
      if (hasStartTxId()) {
        hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getStartTxId());
      }
      if (hasEndTxId()) {
        hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getEndTxId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getReqInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        startTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        endTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000004);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (reqInfoBuilder_ == null) {
          result.reqInfo_ = reqInfo_;
        } else {
          result.reqInfo_ = reqInfoBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.startTxId_ = startTxId_;
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.endTxId_ = endTxId_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this;
        if (other.hasReqInfo()) {
          mergeReqInfo(other.getReqInfo());
        }
        if (other.hasStartTxId()) {
          setStartTxId(other.getStartTxId());
        }
        if (other.hasEndTxId()) {
          setEndTxId(other.getEndTxId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasReqInfo()) {
          
          return false;
        }
        if (!hasStartTxId()) {
          
          return false;
        }
        if (!hasEndTxId()) {
          
          return false;
        }
        if (!getReqInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
              if (hasReqInfo()) {
                subBuilder.mergeFrom(getReqInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setReqInfo(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              startTxId_ = input.readUInt64();
              break;
            }
            case 24: {
              bitField0_ |= 0x00000004;
              endTxId_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RequestInfoProto reqInfo = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
      public boolean hasReqInfo() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
        if (reqInfoBuilder_ == null) {
          return reqInfo_;
        } else {
          return reqInfoBuilder_.getMessage();
        }
      }
      public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reqInfo_ = value;
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setReqInfo(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = builderForValue.build();
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
            reqInfo_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
          } else {
            reqInfo_ = value;
          }
          onChanged();
        } else {
          reqInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearReqInfo() {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
          onChanged();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getReqInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
        if (reqInfoBuilder_ != null) {
          return reqInfoBuilder_.getMessageOrBuilder();
        } else {
          return reqInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
          getReqInfoFieldBuilder() {
        if (reqInfoBuilder_ == null) {
          reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
                  reqInfo_,
                  getParentForChildren(),
                  isClean());
          reqInfo_ = null;
        }
        return reqInfoBuilder_;
      }
      
      // required uint64 startTxId = 2;
      private long startTxId_ ;
      public boolean hasStartTxId() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getStartTxId() {
        return startTxId_;
      }
      public Builder setStartTxId(long value) {
        bitField0_ |= 0x00000002;
        startTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearStartTxId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        startTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // required uint64 endTxId = 3;
      private long endTxId_ ;
      public boolean hasEndTxId() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public long getEndTxId() {
        return endTxId_;
      }
      public Builder setEndTxId(long value) {
        bitField0_ |= 0x00000004;
        endTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearEndTxId() {
        bitField0_ = (bitField0_ & ~0x00000004);
        endTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:FinalizeLogSegmentRequestProto)
    }
    
    static {
      defaultInstance = new FinalizeLogSegmentRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:FinalizeLogSegmentRequestProto)
  }
  
  public interface FinalizeLogSegmentResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
  }
  public static final class FinalizeLogSegmentResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements FinalizeLogSegmentResponseProtoOrBuilder {
    // Use FinalizeLogSegmentResponseProto.newBuilder() to construct.
    private FinalizeLogSegmentResponseProto(Builder builder) {
      super(builder);
    }
    private FinalizeLogSegmentResponseProto(boolean noInit) {}
    
    private static final FinalizeLogSegmentResponseProto defaultInstance;
    public static FinalizeLogSegmentResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentResponseProto_fieldAccessorTable;
    }
    
    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj;
      
      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FinalizeLogSegmentResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this);
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
          }
        }
      }
      
      
      // @@protoc_insertion_point(builder_scope:FinalizeLogSegmentResponseProto)
    }
    
    static {
      defaultInstance = new FinalizeLogSegmentResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:FinalizeLogSegmentResponseProto)
  }
  
  public interface PurgeLogsRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RequestInfoProto reqInfo = 1;
    boolean hasReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
    
    // required uint64 minTxIdToKeep = 2;
    boolean hasMinTxIdToKeep();
    long getMinTxIdToKeep();
  }
  public static final class PurgeLogsRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements PurgeLogsRequestProtoOrBuilder {
    // Use PurgeLogsRequestProto.newBuilder() to construct.
    private PurgeLogsRequestProto(Builder builder) {
      super(builder);
    }
    private PurgeLogsRequestProto(boolean noInit) {}
    
    private static final PurgeLogsRequestProto defaultInstance;
    public static PurgeLogsRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public PurgeLogsRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RequestInfoProto reqInfo = 1;
    public static final int REQINFO_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
    public boolean hasReqInfo() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
      return reqInfo_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
      return reqInfo_;
    }
    
    // required uint64 minTxIdToKeep = 2;
    public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2;
    private long minTxIdToKeep_;
    public boolean hasMinTxIdToKeep() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getMinTxIdToKeep() {
      return minTxIdToKeep_;
    }
    
    private void initFields() {
      reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      minTxIdToKeep_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasReqInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasMinTxIdToKeep()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getReqInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, minTxIdToKeep_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, minTxIdToKeep_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj;
      
      boolean result = true;
      result = result && (hasReqInfo() == other.hasReqInfo());
      if (hasReqInfo()) {
        result = result && getReqInfo()
            .equals(other.getReqInfo());
      }
      result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep());
      if (hasMinTxIdToKeep()) {
        result = result && (getMinTxIdToKeep()
            == other.getMinTxIdToKeep());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasReqInfo()) {
        hash = (37 * hash) + REQINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReqInfo().hashCode();
      }
      if (hasMinTxIdToKeep()) {
        hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getMinTxIdToKeep());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getReqInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        minTxIdToKeep_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (reqInfoBuilder_ == null) {
          result.reqInfo_ = reqInfo_;
        } else {
          result.reqInfo_ = reqInfoBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.minTxIdToKeep_ = minTxIdToKeep_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this;
        if (other.hasReqInfo()) {
          mergeReqInfo(other.getReqInfo());
        }
        if (other.hasMinTxIdToKeep()) {
          setMinTxIdToKeep(other.getMinTxIdToKeep());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasReqInfo()) {
          
          return false;
        }
        if (!hasMinTxIdToKeep()) {
          
          return false;
        }
        if (!getReqInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
              if (hasReqInfo()) {
                subBuilder.mergeFrom(getReqInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setReqInfo(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              minTxIdToKeep_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RequestInfoProto reqInfo = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
      public boolean hasReqInfo() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
        if (reqInfoBuilder_ == null) {
          return reqInfo_;
        } else {
          return reqInfoBuilder_.getMessage();
        }
      }
      public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reqInfo_ = value;
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setReqInfo(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = builderForValue.build();
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
            reqInfo_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
          } else {
            reqInfo_ = value;
          }
          onChanged();
        } else {
          reqInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearReqInfo() {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
          onChanged();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getReqInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
        if (reqInfoBuilder_ != null) {
          return reqInfoBuilder_.getMessageOrBuilder();
        } else {
          return reqInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
          getReqInfoFieldBuilder() {
        if (reqInfoBuilder_ == null) {
          reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
                  reqInfo_,
                  getParentForChildren(),
                  isClean());
          reqInfo_ = null;
        }
        return reqInfoBuilder_;
      }
      
      // required uint64 minTxIdToKeep = 2;
      private long minTxIdToKeep_ ;
      public boolean hasMinTxIdToKeep() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getMinTxIdToKeep() {
        return minTxIdToKeep_;
      }
      public Builder setMinTxIdToKeep(long value) {
        bitField0_ |= 0x00000002;
        minTxIdToKeep_ = value;
        onChanged();
        return this;
      }
      public Builder clearMinTxIdToKeep() {
        bitField0_ = (bitField0_ & ~0x00000002);
        minTxIdToKeep_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:PurgeLogsRequestProto)
    }
    
    static {
      defaultInstance = new PurgeLogsRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:PurgeLogsRequestProto)
  }
  
  public interface PurgeLogsResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
  }
  public static final class PurgeLogsResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements PurgeLogsResponseProtoOrBuilder {
    // Use PurgeLogsResponseProto.newBuilder() to construct.
    private PurgeLogsResponseProto(Builder builder) {
      super(builder);
    }
    private PurgeLogsResponseProto(boolean noInit) {}
    
    private static final PurgeLogsResponseProto defaultInstance;
    public static PurgeLogsResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public PurgeLogsResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsResponseProto_fieldAccessorTable;
    }
    
    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj;
      
      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PurgeLogsResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this);
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
          }
        }
      }
      
      
      // @@protoc_insertion_point(builder_scope:PurgeLogsResponseProto)
    }
    
    static {
      defaultInstance = new PurgeLogsResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:PurgeLogsResponseProto)
  }
  
  public interface IsFormattedRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .JournalIdProto jid = 1;
    boolean hasJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
  }
  public static final class IsFormattedRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements IsFormattedRequestProtoOrBuilder {
    // Use IsFormattedRequestProto.newBuilder() to construct.
    private IsFormattedRequestProto(Builder builder) {
      super(builder);
    }
    private IsFormattedRequestProto(boolean noInit) {}
    
    private static final IsFormattedRequestProto defaultInstance;
    public static IsFormattedRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public IsFormattedRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .JournalIdProto jid = 1;
    public static final int JID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
    public boolean hasJid() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
      return jid_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
      return jid_;
    }
    
    private void initFields() {
      jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasJid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getJid().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, jid_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, jid_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj;
      
      boolean result = true;
      result = result && (hasJid() == other.hasJid());
      if (hasJid()) {
        result = result && getJid()
            .equals(other.getJid());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasJid()) {
        hash = (37 * hash) + JID_FIELD_NUMBER;
        hash = (53 * hash) + getJid().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getJidFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (jidBuilder_ == null) {
          result.jid_ = jid_;
        } else {
          result.jid_ = jidBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this;
        if (other.hasJid()) {
          mergeJid(other.getJid());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasJid()) {
          
          return false;
        }
        if (!getJid().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
              if (hasJid()) {
                subBuilder.mergeFrom(getJid());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setJid(subBuilder.buildPartial());
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .JournalIdProto jid = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
      public boolean hasJid() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
        if (jidBuilder_ == null) {
          return jid_;
        } else {
          return jidBuilder_.getMessage();
        }
      }
      public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          jid_ = value;
          onChanged();
        } else {
          jidBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setJid(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
        if (jidBuilder_ == null) {
          jid_ = builderForValue.build();
          onChanged();
        } else {
          jidBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
            jid_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
          } else {
            jid_ = value;
          }
          onChanged();
        } else {
          jidBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearJid() {
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
          onChanged();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getJidFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
        if (jidBuilder_ != null) {
          return jidBuilder_.getMessageOrBuilder();
        } else {
          return jid_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
          getJidFieldBuilder() {
        if (jidBuilder_ == null) {
          jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
                  jid_,
                  getParentForChildren(),
                  isClean());
          jid_ = null;
        }
        return jidBuilder_;
      }
      
      // @@protoc_insertion_point(builder_scope:IsFormattedRequestProto)
    }
    
    static {
      defaultInstance = new IsFormattedRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:IsFormattedRequestProto)
  }
  
  public interface IsFormattedResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required bool isFormatted = 1;
    boolean hasIsFormatted();
    boolean getIsFormatted();
  }
  public static final class IsFormattedResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements IsFormattedResponseProtoOrBuilder {
    // Use IsFormattedResponseProto.newBuilder() to construct.
    private IsFormattedResponseProto(Builder builder) {
      super(builder);
    }
    private IsFormattedResponseProto(boolean noInit) {}
    
    private static final IsFormattedResponseProto defaultInstance;
    public static IsFormattedResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public IsFormattedResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedResponseProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required bool isFormatted = 1;
    public static final int ISFORMATTED_FIELD_NUMBER = 1;
    private boolean isFormatted_;
    public boolean hasIsFormatted() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public boolean getIsFormatted() {
      return isFormatted_;
    }
    
    private void initFields() {
      isFormatted_ = false;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasIsFormatted()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeBool(1, isFormatted_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeBoolSize(1, isFormatted_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj;
      
      boolean result = true;
      result = result && (hasIsFormatted() == other.hasIsFormatted());
      if (hasIsFormatted()) {
        result = result && (getIsFormatted()
            == other.getIsFormatted());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasIsFormatted()) {
        hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER;
        hash = (53 * hash) + hashBoolean(getIsFormatted());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_IsFormattedResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        isFormatted_ = false;
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        result.isFormatted_ = isFormatted_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this;
        if (other.hasIsFormatted()) {
          setIsFormatted(other.getIsFormatted());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasIsFormatted()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 8: {
              bitField0_ |= 0x00000001;
              isFormatted_ = input.readBool();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required bool isFormatted = 1;
      private boolean isFormatted_ ;
      public boolean hasIsFormatted() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public boolean getIsFormatted() {
        return isFormatted_;
      }
      public Builder setIsFormatted(boolean value) {
        bitField0_ |= 0x00000001;
        isFormatted_ = value;
        onChanged();
        return this;
      }
      public Builder clearIsFormatted() {
        bitField0_ = (bitField0_ & ~0x00000001);
        isFormatted_ = false;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:IsFormattedResponseProto)
    }
    
    static {
      defaultInstance = new IsFormattedResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:IsFormattedResponseProto)
  }
  
  public interface GetJournalStateRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .JournalIdProto jid = 1;
    boolean hasJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
  }
  public static final class GetJournalStateRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements GetJournalStateRequestProtoOrBuilder {
    // Use GetJournalStateRequestProto.newBuilder() to construct.
    private GetJournalStateRequestProto(Builder builder) {
      super(builder);
    }
    private GetJournalStateRequestProto(boolean noInit) {}
    
    private static final GetJournalStateRequestProto defaultInstance;
    public static GetJournalStateRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public GetJournalStateRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .JournalIdProto jid = 1;
    public static final int JID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
    public boolean hasJid() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
      return jid_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
      return jid_;
    }
    
    private void initFields() {
      jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasJid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getJid().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, jid_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, jid_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj;
      
      boolean result = true;
      result = result && (hasJid() == other.hasJid());
      if (hasJid()) {
        result = result && getJid()
            .equals(other.getJid());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasJid()) {
        hash = (37 * hash) + JID_FIELD_NUMBER;
        hash = (53 * hash) + getJid().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getJidFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (jidBuilder_ == null) {
          result.jid_ = jid_;
        } else {
          result.jid_ = jidBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this;
        if (other.hasJid()) {
          mergeJid(other.getJid());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasJid()) {
          
          return false;
        }
        if (!getJid().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
              if (hasJid()) {
                subBuilder.mergeFrom(getJid());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setJid(subBuilder.buildPartial());
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .JournalIdProto jid = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
      public boolean hasJid() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
        if (jidBuilder_ == null) {
          return jid_;
        } else {
          return jidBuilder_.getMessage();
        }
      }
      public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          jid_ = value;
          onChanged();
        } else {
          jidBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setJid(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
        if (jidBuilder_ == null) {
          jid_ = builderForValue.build();
          onChanged();
        } else {
          jidBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
            jid_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
          } else {
            jid_ = value;
          }
          onChanged();
        } else {
          jidBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearJid() {
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
          onChanged();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getJidFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
        if (jidBuilder_ != null) {
          return jidBuilder_.getMessageOrBuilder();
        } else {
          return jid_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
          getJidFieldBuilder() {
        if (jidBuilder_ == null) {
          jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
                  jid_,
                  getParentForChildren(),
                  isClean());
          jid_ = null;
        }
        return jidBuilder_;
      }
      
      // @@protoc_insertion_point(builder_scope:GetJournalStateRequestProto)
    }
    
    static {
      defaultInstance = new GetJournalStateRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:GetJournalStateRequestProto)
  }
  
  public interface GetJournalStateResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required uint64 lastPromisedEpoch = 1;
    boolean hasLastPromisedEpoch();
    long getLastPromisedEpoch();
    
    // required uint32 httpPort = 2;
    boolean hasHttpPort();
    int getHttpPort();
  }
  public static final class GetJournalStateResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements GetJournalStateResponseProtoOrBuilder {
    // Use GetJournalStateResponseProto.newBuilder() to construct.
    private GetJournalStateResponseProto(Builder builder) {
      super(builder);
    }
    private GetJournalStateResponseProto(boolean noInit) {}
    
    private static final GetJournalStateResponseProto defaultInstance;
    public static GetJournalStateResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public GetJournalStateResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateResponseProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required uint64 lastPromisedEpoch = 1;
    public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1;
    private long lastPromisedEpoch_;
    public boolean hasLastPromisedEpoch() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public long getLastPromisedEpoch() {
      return lastPromisedEpoch_;
    }
    
    // required uint32 httpPort = 2;
    public static final int HTTPPORT_FIELD_NUMBER = 2;
    private int httpPort_;
    public boolean hasHttpPort() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public int getHttpPort() {
      return httpPort_;
    }
    
    private void initFields() {
      lastPromisedEpoch_ = 0L;
      httpPort_ = 0;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasLastPromisedEpoch()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasHttpPort()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeUInt64(1, lastPromisedEpoch_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt32(2, httpPort_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(1, lastPromisedEpoch_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt32Size(2, httpPort_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj;
      
      boolean result = true;
      result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch());
      if (hasLastPromisedEpoch()) {
        result = result && (getLastPromisedEpoch()
            == other.getLastPromisedEpoch());
      }
      result = result && (hasHttpPort() == other.hasHttpPort());
      if (hasHttpPort()) {
        result = result && (getHttpPort()
            == other.getHttpPort());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasLastPromisedEpoch()) {
        hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getLastPromisedEpoch());
      }
      if (hasHttpPort()) {
        hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
        hash = (53 * hash) + getHttpPort();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetJournalStateResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        lastPromisedEpoch_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000001);
        httpPort_ = 0;
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        result.lastPromisedEpoch_ = lastPromisedEpoch_;
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.httpPort_ = httpPort_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this;
        if (other.hasLastPromisedEpoch()) {
          setLastPromisedEpoch(other.getLastPromisedEpoch());
        }
        if (other.hasHttpPort()) {
          setHttpPort(other.getHttpPort());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasLastPromisedEpoch()) {
          
          return false;
        }
        if (!hasHttpPort()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 8: {
              bitField0_ |= 0x00000001;
              lastPromisedEpoch_ = input.readUInt64();
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              httpPort_ = input.readUInt32();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required uint64 lastPromisedEpoch = 1;
      private long lastPromisedEpoch_ ;
      public boolean hasLastPromisedEpoch() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public long getLastPromisedEpoch() {
        return lastPromisedEpoch_;
      }
      public Builder setLastPromisedEpoch(long value) {
        bitField0_ |= 0x00000001;
        lastPromisedEpoch_ = value;
        onChanged();
        return this;
      }
      public Builder clearLastPromisedEpoch() {
        bitField0_ = (bitField0_ & ~0x00000001);
        lastPromisedEpoch_ = 0L;
        onChanged();
        return this;
      }
      
      // required uint32 httpPort = 2;
      private int httpPort_ ;
      public boolean hasHttpPort() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public int getHttpPort() {
        return httpPort_;
      }
      public Builder setHttpPort(int value) {
        bitField0_ |= 0x00000002;
        httpPort_ = value;
        onChanged();
        return this;
      }
      public Builder clearHttpPort() {
        bitField0_ = (bitField0_ & ~0x00000002);
        httpPort_ = 0;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:GetJournalStateResponseProto)
    }
    
    static {
      defaultInstance = new GetJournalStateResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:GetJournalStateResponseProto)
  }
  
  public interface FormatRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .JournalIdProto jid = 1;
    boolean hasJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
    
    // required .NamespaceInfoProto nsInfo = 2;
    boolean hasNsInfo();
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
  }
  public static final class FormatRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements FormatRequestProtoOrBuilder {
    // Use FormatRequestProto.newBuilder() to construct.
    private FormatRequestProto(Builder builder) {
      super(builder);
    }
    private FormatRequestProto(boolean noInit) {}
    
    private static final FormatRequestProto defaultInstance;
    public static FormatRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public FormatRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .JournalIdProto jid = 1;
    public static final int JID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
    public boolean hasJid() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
      return jid_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
      return jid_;
    }
    
    // required .NamespaceInfoProto nsInfo = 2;
    public static final int NSINFO_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
    public boolean hasNsInfo() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
      return nsInfo_;
    }
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
      return nsInfo_;
    }
    
    private void initFields() {
      jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasJid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNsInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getJid().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getNsInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, jid_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeMessage(2, nsInfo_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, jid_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(2, nsInfo_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj;
      
      boolean result = true;
      result = result && (hasJid() == other.hasJid());
      if (hasJid()) {
        result = result && getJid()
            .equals(other.getJid());
      }
      result = result && (hasNsInfo() == other.hasNsInfo());
      if (hasNsInfo()) {
        result = result && getNsInfo()
            .equals(other.getNsInfo());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasJid()) {
        hash = (37 * hash) + JID_FIELD_NUMBER;
        hash = (53 * hash) + getJid().hashCode();
      }
      if (hasNsInfo()) {
        hash = (37 * hash) + NSINFO_FIELD_NUMBER;
        hash = (53 * hash) + getNsInfo().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getJidFieldBuilder();
          getNsInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        if (nsInfoBuilder_ == null) {
          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
        } else {
          nsInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (jidBuilder_ == null) {
          result.jid_ = jid_;
        } else {
          result.jid_ = jidBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        if (nsInfoBuilder_ == null) {
          result.nsInfo_ = nsInfo_;
        } else {
          result.nsInfo_ = nsInfoBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this;
        if (other.hasJid()) {
          mergeJid(other.getJid());
        }
        if (other.hasNsInfo()) {
          mergeNsInfo(other.getNsInfo());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasJid()) {
          
          return false;
        }
        if (!hasNsInfo()) {
          
          return false;
        }
        if (!getJid().isInitialized()) {
          
          return false;
        }
        if (!getNsInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
              if (hasJid()) {
                subBuilder.mergeFrom(getJid());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setJid(subBuilder.buildPartial());
              break;
            }
            case 18: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder();
              if (hasNsInfo()) {
                subBuilder.mergeFrom(getNsInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setNsInfo(subBuilder.buildPartial());
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .JournalIdProto jid = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
      public boolean hasJid() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
        if (jidBuilder_ == null) {
          return jid_;
        } else {
          return jidBuilder_.getMessage();
        }
      }
      public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          jid_ = value;
          onChanged();
        } else {
          jidBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setJid(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
        if (jidBuilder_ == null) {
          jid_ = builderForValue.build();
          onChanged();
        } else {
          jidBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
            jid_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
          } else {
            jid_ = value;
          }
          onChanged();
        } else {
          jidBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearJid() {
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
          onChanged();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getJidFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
        if (jidBuilder_ != null) {
          return jidBuilder_.getMessageOrBuilder();
        } else {
          return jid_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
          getJidFieldBuilder() {
        if (jidBuilder_ == null) {
          jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
                  jid_,
                  getParentForChildren(),
                  isClean());
          jid_ = null;
        }
        return jidBuilder_;
      }
      
      // required .NamespaceInfoProto nsInfo = 2;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
      public boolean hasNsInfo() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
        if (nsInfoBuilder_ == null) {
          return nsInfo_;
        } else {
          return nsInfoBuilder_.getMessage();
        }
      }
      public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
        if (nsInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          nsInfo_ = value;
          onChanged();
        } else {
          nsInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder setNsInfo(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
        if (nsInfoBuilder_ == null) {
          nsInfo_ = builderForValue.build();
          onChanged();
        } else {
          nsInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
        if (nsInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000002) == 0x00000002) &&
              nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
            nsInfo_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
          } else {
            nsInfo_ = value;
          }
          onChanged();
        } else {
          nsInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder clearNsInfo() {
        if (nsInfoBuilder_ == null) {
          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
          onChanged();
        } else {
          nsInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getNsInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
        if (nsInfoBuilder_ != null) {
          return nsInfoBuilder_.getMessageOrBuilder();
        } else {
          return nsInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
          getNsInfoFieldBuilder() {
        if (nsInfoBuilder_ == null) {
          nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
                  nsInfo_,
                  getParentForChildren(),
                  isClean());
          nsInfo_ = null;
        }
        return nsInfoBuilder_;
      }
      
      // @@protoc_insertion_point(builder_scope:FormatRequestProto)
    }
    
    static {
      defaultInstance = new FormatRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:FormatRequestProto)
  }
  
  public interface FormatResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
  }
  public static final class FormatResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements FormatResponseProtoOrBuilder {
    // Use FormatResponseProto.newBuilder() to construct.
    private FormatResponseProto(Builder builder) {
      super(builder);
    }
    private FormatResponseProto(boolean noInit) {}
    
    private static final FormatResponseProto defaultInstance;
    public static FormatResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public FormatResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatResponseProto_fieldAccessorTable;
    }
    
    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj;
      
      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_FormatResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this);
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
          }
        }
      }
      
      
      // @@protoc_insertion_point(builder_scope:FormatResponseProto)
    }
    
    static {
      defaultInstance = new FormatResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:FormatResponseProto)
  }
  
  public interface NewEpochRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .JournalIdProto jid = 1;
    boolean hasJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
    
    // required .NamespaceInfoProto nsInfo = 2;
    boolean hasNsInfo();
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
    
    // required uint64 epoch = 3;
    boolean hasEpoch();
    long getEpoch();
  }
  public static final class NewEpochRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements NewEpochRequestProtoOrBuilder {
    // Use NewEpochRequestProto.newBuilder() to construct.
    private NewEpochRequestProto(Builder builder) {
      super(builder);
    }
    private NewEpochRequestProto(boolean noInit) {}
    
    private static final NewEpochRequestProto defaultInstance;
    public static NewEpochRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public NewEpochRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .JournalIdProto jid = 1;
    public static final int JID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
    public boolean hasJid() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
      return jid_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
      return jid_;
    }
    
    // required .NamespaceInfoProto nsInfo = 2;
    public static final int NSINFO_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
    public boolean hasNsInfo() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
      return nsInfo_;
    }
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
      return nsInfo_;
    }
    
    // required uint64 epoch = 3;
    public static final int EPOCH_FIELD_NUMBER = 3;
    private long epoch_;
    public boolean hasEpoch() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public long getEpoch() {
      return epoch_;
    }
    
    private void initFields() {
      jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
      epoch_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasJid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNsInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEpoch()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getJid().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getNsInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, jid_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeMessage(2, nsInfo_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeUInt64(3, epoch_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, jid_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(2, nsInfo_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(3, epoch_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj;
      
      boolean result = true;
      result = result && (hasJid() == other.hasJid());
      if (hasJid()) {
        result = result && getJid()
            .equals(other.getJid());
      }
      result = result && (hasNsInfo() == other.hasNsInfo());
      if (hasNsInfo()) {
        result = result && getNsInfo()
            .equals(other.getNsInfo());
      }
      result = result && (hasEpoch() == other.hasEpoch());
      if (hasEpoch()) {
        result = result && (getEpoch()
            == other.getEpoch());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasJid()) {
        hash = (37 * hash) + JID_FIELD_NUMBER;
        hash = (53 * hash) + getJid().hashCode();
      }
      if (hasNsInfo()) {
        hash = (37 * hash) + NSINFO_FIELD_NUMBER;
        hash = (53 * hash) + getNsInfo().hashCode();
      }
      if (hasEpoch()) {
        hash = (37 * hash) + EPOCH_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getEpoch());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getJidFieldBuilder();
          getNsInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        if (nsInfoBuilder_ == null) {
          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
        } else {
          nsInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        epoch_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000004);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (jidBuilder_ == null) {
          result.jid_ = jid_;
        } else {
          result.jid_ = jidBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        if (nsInfoBuilder_ == null) {
          result.nsInfo_ = nsInfo_;
        } else {
          result.nsInfo_ = nsInfoBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.epoch_ = epoch_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this;
        if (other.hasJid()) {
          mergeJid(other.getJid());
        }
        if (other.hasNsInfo()) {
          mergeNsInfo(other.getNsInfo());
        }
        if (other.hasEpoch()) {
          setEpoch(other.getEpoch());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasJid()) {
          
          return false;
        }
        if (!hasNsInfo()) {
          
          return false;
        }
        if (!hasEpoch()) {
          
          return false;
        }
        if (!getJid().isInitialized()) {
          
          return false;
        }
        if (!getNsInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
              if (hasJid()) {
                subBuilder.mergeFrom(getJid());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setJid(subBuilder.buildPartial());
              break;
            }
            case 18: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder();
              if (hasNsInfo()) {
                subBuilder.mergeFrom(getNsInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setNsInfo(subBuilder.buildPartial());
              break;
            }
            case 24: {
              bitField0_ |= 0x00000004;
              epoch_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .JournalIdProto jid = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
      public boolean hasJid() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
        if (jidBuilder_ == null) {
          return jid_;
        } else {
          return jidBuilder_.getMessage();
        }
      }
      public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          jid_ = value;
          onChanged();
        } else {
          jidBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setJid(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
        if (jidBuilder_ == null) {
          jid_ = builderForValue.build();
          onChanged();
        } else {
          jidBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
            jid_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
          } else {
            jid_ = value;
          }
          onChanged();
        } else {
          jidBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearJid() {
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
          onChanged();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getJidFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
        if (jidBuilder_ != null) {
          return jidBuilder_.getMessageOrBuilder();
        } else {
          return jid_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
          getJidFieldBuilder() {
        if (jidBuilder_ == null) {
          jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
                  jid_,
                  getParentForChildren(),
                  isClean());
          jid_ = null;
        }
        return jidBuilder_;
      }
      
      // required .NamespaceInfoProto nsInfo = 2;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
      public boolean hasNsInfo() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
        if (nsInfoBuilder_ == null) {
          return nsInfo_;
        } else {
          return nsInfoBuilder_.getMessage();
        }
      }
      public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
        if (nsInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          nsInfo_ = value;
          onChanged();
        } else {
          nsInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder setNsInfo(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
        if (nsInfoBuilder_ == null) {
          nsInfo_ = builderForValue.build();
          onChanged();
        } else {
          nsInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
        if (nsInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000002) == 0x00000002) &&
              nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
            nsInfo_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
          } else {
            nsInfo_ = value;
          }
          onChanged();
        } else {
          nsInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder clearNsInfo() {
        if (nsInfoBuilder_ == null) {
          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
          onChanged();
        } else {
          nsInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getNsInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
        if (nsInfoBuilder_ != null) {
          return nsInfoBuilder_.getMessageOrBuilder();
        } else {
          return nsInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
          getNsInfoFieldBuilder() {
        if (nsInfoBuilder_ == null) {
          nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
                  nsInfo_,
                  getParentForChildren(),
                  isClean());
          nsInfo_ = null;
        }
        return nsInfoBuilder_;
      }
      
      // required uint64 epoch = 3;
      private long epoch_ ;
      public boolean hasEpoch() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public long getEpoch() {
        return epoch_;
      }
      public Builder setEpoch(long value) {
        bitField0_ |= 0x00000004;
        epoch_ = value;
        onChanged();
        return this;
      }
      public Builder clearEpoch() {
        bitField0_ = (bitField0_ & ~0x00000004);
        epoch_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:NewEpochRequestProto)
    }
    
    static {
      defaultInstance = new NewEpochRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:NewEpochRequestProto)
  }
  
  public interface NewEpochResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // optional uint64 lastSegmentTxId = 1;
    boolean hasLastSegmentTxId();
    long getLastSegmentTxId();
  }
  public static final class NewEpochResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements NewEpochResponseProtoOrBuilder {
    // Use NewEpochResponseProto.newBuilder() to construct.
    private NewEpochResponseProto(Builder builder) {
      super(builder);
    }
    private NewEpochResponseProto(boolean noInit) {}
    
    private static final NewEpochResponseProto defaultInstance;
    public static NewEpochResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public NewEpochResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochResponseProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // optional uint64 lastSegmentTxId = 1;
    public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1;
    private long lastSegmentTxId_;
    public boolean hasLastSegmentTxId() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public long getLastSegmentTxId() {
      return lastSegmentTxId_;
    }
    
    private void initFields() {
      lastSegmentTxId_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeUInt64(1, lastSegmentTxId_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(1, lastSegmentTxId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj;
      
      boolean result = true;
      result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId());
      if (hasLastSegmentTxId()) {
        result = result && (getLastSegmentTxId()
            == other.getLastSegmentTxId());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasLastSegmentTxId()) {
        hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getLastSegmentTxId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_NewEpochResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        lastSegmentTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        result.lastSegmentTxId_ = lastSegmentTxId_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this;
        if (other.hasLastSegmentTxId()) {
          setLastSegmentTxId(other.getLastSegmentTxId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 8: {
              bitField0_ |= 0x00000001;
              lastSegmentTxId_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // optional uint64 lastSegmentTxId = 1;
      private long lastSegmentTxId_ ;
      public boolean hasLastSegmentTxId() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public long getLastSegmentTxId() {
        return lastSegmentTxId_;
      }
      public Builder setLastSegmentTxId(long value) {
        bitField0_ |= 0x00000001;
        lastSegmentTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearLastSegmentTxId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        lastSegmentTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:NewEpochResponseProto)
    }
    
    static {
      defaultInstance = new NewEpochResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:NewEpochResponseProto)
  }
  
  public interface GetEditLogManifestRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .JournalIdProto jid = 1;
    boolean hasJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
    
    // required uint64 sinceTxId = 2;
    boolean hasSinceTxId();
    long getSinceTxId();
    
    // optional bool inProgressOk = 4 [default = false];
    boolean hasInProgressOk();
    boolean getInProgressOk();
  }
  public static final class GetEditLogManifestRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements GetEditLogManifestRequestProtoOrBuilder {
    // Use GetEditLogManifestRequestProto.newBuilder() to construct.
    private GetEditLogManifestRequestProto(Builder builder) {
      super(builder);
    }
    private GetEditLogManifestRequestProto(boolean noInit) {}
    
    private static final GetEditLogManifestRequestProto defaultInstance;
    public static GetEditLogManifestRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public GetEditLogManifestRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .JournalIdProto jid = 1;
    public static final int JID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
    public boolean hasJid() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
      return jid_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
      return jid_;
    }
    
    // required uint64 sinceTxId = 2;
    public static final int SINCETXID_FIELD_NUMBER = 2;
    private long sinceTxId_;
    public boolean hasSinceTxId() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getSinceTxId() {
      return sinceTxId_;
    }
    
    // optional bool inProgressOk = 4 [default = false];
    public static final int INPROGRESSOK_FIELD_NUMBER = 4;
    private boolean inProgressOk_;
    public boolean hasInProgressOk() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public boolean getInProgressOk() {
      return inProgressOk_;
    }
    
    private void initFields() {
      jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      sinceTxId_ = 0L;
      inProgressOk_ = false;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasJid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSinceTxId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getJid().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, jid_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, sinceTxId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeBool(4, inProgressOk_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, jid_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, sinceTxId_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeBoolSize(4, inProgressOk_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj;
      
      boolean result = true;
      result = result && (hasJid() == other.hasJid());
      if (hasJid()) {
        result = result && getJid()
            .equals(other.getJid());
      }
      result = result && (hasSinceTxId() == other.hasSinceTxId());
      if (hasSinceTxId()) {
        result = result && (getSinceTxId()
            == other.getSinceTxId());
      }
      result = result && (hasInProgressOk() == other.hasInProgressOk());
      if (hasInProgressOk()) {
        result = result && (getInProgressOk()
            == other.getInProgressOk());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasJid()) {
        hash = (37 * hash) + JID_FIELD_NUMBER;
        hash = (53 * hash) + getJid().hashCode();
      }
      if (hasSinceTxId()) {
        hash = (37 * hash) + SINCETXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getSinceTxId());
      }
      if (hasInProgressOk()) {
        hash = (37 * hash) + INPROGRESSOK_FIELD_NUMBER;
        hash = (53 * hash) + hashBoolean(getInProgressOk());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getJidFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        sinceTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        inProgressOk_ = false;
        bitField0_ = (bitField0_ & ~0x00000004);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (jidBuilder_ == null) {
          result.jid_ = jid_;
        } else {
          result.jid_ = jidBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.sinceTxId_ = sinceTxId_;
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.inProgressOk_ = inProgressOk_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this;
        if (other.hasJid()) {
          mergeJid(other.getJid());
        }
        if (other.hasSinceTxId()) {
          setSinceTxId(other.getSinceTxId());
        }
        if (other.hasInProgressOk()) {
          setInProgressOk(other.getInProgressOk());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasJid()) {
          
          return false;
        }
        if (!hasSinceTxId()) {
          
          return false;
        }
        if (!getJid().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
              if (hasJid()) {
                subBuilder.mergeFrom(getJid());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setJid(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              sinceTxId_ = input.readUInt64();
              break;
            }
            case 32: {
              bitField0_ |= 0x00000004;
              inProgressOk_ = input.readBool();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .JournalIdProto jid = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
      public boolean hasJid() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
        if (jidBuilder_ == null) {
          return jid_;
        } else {
          return jidBuilder_.getMessage();
        }
      }
      public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          jid_ = value;
          onChanged();
        } else {
          jidBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setJid(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
        if (jidBuilder_ == null) {
          jid_ = builderForValue.build();
          onChanged();
        } else {
          jidBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
        if (jidBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
            jid_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
          } else {
            jid_ = value;
          }
          onChanged();
        } else {
          jidBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearJid() {
        if (jidBuilder_ == null) {
          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
          onChanged();
        } else {
          jidBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getJidFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
        if (jidBuilder_ != null) {
          return jidBuilder_.getMessageOrBuilder();
        } else {
          return jid_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
          getJidFieldBuilder() {
        if (jidBuilder_ == null) {
          jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
                  jid_,
                  getParentForChildren(),
                  isClean());
          jid_ = null;
        }
        return jidBuilder_;
      }
      
      // required uint64 sinceTxId = 2;
      private long sinceTxId_ ;
      public boolean hasSinceTxId() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getSinceTxId() {
        return sinceTxId_;
      }
      public Builder setSinceTxId(long value) {
        bitField0_ |= 0x00000002;
        sinceTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearSinceTxId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        sinceTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // optional bool inProgressOk = 4 [default = false];
      private boolean inProgressOk_ ;
      public boolean hasInProgressOk() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public boolean getInProgressOk() {
        return inProgressOk_;
      }
      public Builder setInProgressOk(boolean value) {
        bitField0_ |= 0x00000004;
        inProgressOk_ = value;
        onChanged();
        return this;
      }
      public Builder clearInProgressOk() {
        bitField0_ = (bitField0_ & ~0x00000004);
        inProgressOk_ = false;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:GetEditLogManifestRequestProto)
    }
    
    static {
      defaultInstance = new GetEditLogManifestRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:GetEditLogManifestRequestProto)
  }
  
  public interface GetEditLogManifestResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RemoteEditLogManifestProto manifest = 1;
    boolean hasManifest();
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest();
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder();
    
    // required uint32 httpPort = 2;
    boolean hasHttpPort();
    int getHttpPort();
  }
  public static final class GetEditLogManifestResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements GetEditLogManifestResponseProtoOrBuilder {
    // Use GetEditLogManifestResponseProto.newBuilder() to construct.
    private GetEditLogManifestResponseProto(Builder builder) {
      super(builder);
    }
    private GetEditLogManifestResponseProto(boolean noInit) {}
    
    private static final GetEditLogManifestResponseProto defaultInstance;
    public static GetEditLogManifestResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public GetEditLogManifestResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestResponseProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RemoteEditLogManifestProto manifest = 1;
    public static final int MANIFEST_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_;
    public boolean hasManifest() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
      return manifest_;
    }
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
      return manifest_;
    }
    
    // required uint32 httpPort = 2;
    public static final int HTTPPORT_FIELD_NUMBER = 2;
    private int httpPort_;
    public boolean hasHttpPort() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public int getHttpPort() {
      return httpPort_;
    }
    
    private void initFields() {
      manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
      httpPort_ = 0;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasManifest()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasHttpPort()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getManifest().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, manifest_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt32(2, httpPort_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, manifest_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt32Size(2, httpPort_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj;
      
      boolean result = true;
      result = result && (hasManifest() == other.hasManifest());
      if (hasManifest()) {
        result = result && getManifest()
            .equals(other.getManifest());
      }
      result = result && (hasHttpPort() == other.hasHttpPort());
      if (hasHttpPort()) {
        result = result && (getHttpPort()
            == other.getHttpPort());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasManifest()) {
        hash = (37 * hash) + MANIFEST_FIELD_NUMBER;
        hash = (53 * hash) + getManifest().hashCode();
      }
      if (hasHttpPort()) {
        hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
        hash = (53 * hash) + getHttpPort();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_GetEditLogManifestResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getManifestFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (manifestBuilder_ == null) {
          manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
        } else {
          manifestBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        httpPort_ = 0;
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (manifestBuilder_ == null) {
          result.manifest_ = manifest_;
        } else {
          result.manifest_ = manifestBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.httpPort_ = httpPort_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this;
        if (other.hasManifest()) {
          mergeManifest(other.getManifest());
        }
        if (other.hasHttpPort()) {
          setHttpPort(other.getHttpPort());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasManifest()) {
          
          return false;
        }
        if (!hasHttpPort()) {
          
          return false;
        }
        if (!getManifest().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder();
              if (hasManifest()) {
                subBuilder.mergeFrom(getManifest());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setManifest(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              httpPort_ = input.readUInt32();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RemoteEditLogManifestProto manifest = 1;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_;
      public boolean hasManifest() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
        if (manifestBuilder_ == null) {
          return manifest_;
        } else {
          return manifestBuilder_.getMessage();
        }
      }
      public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
        if (manifestBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          manifest_ = value;
          onChanged();
        } else {
          manifestBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setManifest(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) {
        if (manifestBuilder_ == null) {
          manifest_ = builderForValue.build();
          onChanged();
        } else {
          manifestBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
        if (manifestBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) {
            manifest_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial();
          } else {
            manifest_ = value;
          }
          onChanged();
        } else {
          manifestBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearManifest() {
        if (manifestBuilder_ == null) {
          manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
          onChanged();
        } else {
          manifestBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getManifestFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
        if (manifestBuilder_ != null) {
          return manifestBuilder_.getMessageOrBuilder();
        } else {
          return manifest_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> 
          getManifestFieldBuilder() {
        if (manifestBuilder_ == null) {
          manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>(
                  manifest_,
                  getParentForChildren(),
                  isClean());
          manifest_ = null;
        }
        return manifestBuilder_;
      }
      
      // required uint32 httpPort = 2;
      private int httpPort_ ;
      public boolean hasHttpPort() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public int getHttpPort() {
        return httpPort_;
      }
      public Builder setHttpPort(int value) {
        bitField0_ |= 0x00000002;
        httpPort_ = value;
        onChanged();
        return this;
      }
      public Builder clearHttpPort() {
        bitField0_ = (bitField0_ & ~0x00000002);
        httpPort_ = 0;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:GetEditLogManifestResponseProto)
    }
    
    static {
      defaultInstance = new GetEditLogManifestResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:GetEditLogManifestResponseProto)
  }
  
  public interface PrepareRecoveryRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RequestInfoProto reqInfo = 1;
    boolean hasReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
    
    // required uint64 segmentTxId = 2;
    boolean hasSegmentTxId();
    long getSegmentTxId();
  }
  public static final class PrepareRecoveryRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements PrepareRecoveryRequestProtoOrBuilder {
    // Use PrepareRecoveryRequestProto.newBuilder() to construct.
    private PrepareRecoveryRequestProto(Builder builder) {
      super(builder);
    }
    private PrepareRecoveryRequestProto(boolean noInit) {}
    
    private static final PrepareRecoveryRequestProto defaultInstance;
    public static PrepareRecoveryRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public PrepareRecoveryRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RequestInfoProto reqInfo = 1;
    public static final int REQINFO_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
    public boolean hasReqInfo() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
      return reqInfo_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
      return reqInfo_;
    }
    
    // required uint64 segmentTxId = 2;
    public static final int SEGMENTTXID_FIELD_NUMBER = 2;
    private long segmentTxId_;
    public boolean hasSegmentTxId() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getSegmentTxId() {
      return segmentTxId_;
    }
    
    private void initFields() {
      reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      segmentTxId_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasReqInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSegmentTxId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getReqInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, segmentTxId_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, segmentTxId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj;
      
      boolean result = true;
      result = result && (hasReqInfo() == other.hasReqInfo());
      if (hasReqInfo()) {
        result = result && getReqInfo()
            .equals(other.getReqInfo());
      }
      result = result && (hasSegmentTxId() == other.hasSegmentTxId());
      if (hasSegmentTxId()) {
        result = result && (getSegmentTxId()
            == other.getSegmentTxId());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasReqInfo()) {
        hash = (37 * hash) + REQINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReqInfo().hashCode();
      }
      if (hasSegmentTxId()) {
        hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getSegmentTxId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getReqInfoFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        segmentTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (reqInfoBuilder_ == null) {
          result.reqInfo_ = reqInfo_;
        } else {
          result.reqInfo_ = reqInfoBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.segmentTxId_ = segmentTxId_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this;
        if (other.hasReqInfo()) {
          mergeReqInfo(other.getReqInfo());
        }
        if (other.hasSegmentTxId()) {
          setSegmentTxId(other.getSegmentTxId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasReqInfo()) {
          
          return false;
        }
        if (!hasSegmentTxId()) {
          
          return false;
        }
        if (!getReqInfo().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
              if (hasReqInfo()) {
                subBuilder.mergeFrom(getReqInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setReqInfo(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              segmentTxId_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RequestInfoProto reqInfo = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
      public boolean hasReqInfo() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
        if (reqInfoBuilder_ == null) {
          return reqInfo_;
        } else {
          return reqInfoBuilder_.getMessage();
        }
      }
      public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reqInfo_ = value;
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setReqInfo(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = builderForValue.build();
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
            reqInfo_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
          } else {
            reqInfo_ = value;
          }
          onChanged();
        } else {
          reqInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearReqInfo() {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
          onChanged();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getReqInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
        if (reqInfoBuilder_ != null) {
          return reqInfoBuilder_.getMessageOrBuilder();
        } else {
          return reqInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
          getReqInfoFieldBuilder() {
        if (reqInfoBuilder_ == null) {
          reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
                  reqInfo_,
                  getParentForChildren(),
                  isClean());
          reqInfo_ = null;
        }
        return reqInfoBuilder_;
      }
      
      // required uint64 segmentTxId = 2;
      private long segmentTxId_ ;
      public boolean hasSegmentTxId() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getSegmentTxId() {
        return segmentTxId_;
      }
      public Builder setSegmentTxId(long value) {
        bitField0_ |= 0x00000002;
        segmentTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearSegmentTxId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        segmentTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:PrepareRecoveryRequestProto)
    }
    
    static {
      defaultInstance = new PrepareRecoveryRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:PrepareRecoveryRequestProto)
  }
  
  public interface PrepareRecoveryResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // optional .SegmentStateProto segmentState = 1;
    boolean hasSegmentState();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
    
    // optional uint64 acceptedInEpoch = 2;
    boolean hasAcceptedInEpoch();
    long getAcceptedInEpoch();
    
    // required uint64 lastWriterEpoch = 3;
    boolean hasLastWriterEpoch();
    long getLastWriterEpoch();
    
    // optional uint64 lastCommittedTxId = 4;
    boolean hasLastCommittedTxId();
    long getLastCommittedTxId();
  }
  public static final class PrepareRecoveryResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements PrepareRecoveryResponseProtoOrBuilder {
    // Use PrepareRecoveryResponseProto.newBuilder() to construct.
    private PrepareRecoveryResponseProto(Builder builder) {
      super(builder);
    }
    private PrepareRecoveryResponseProto(boolean noInit) {}
    
    private static final PrepareRecoveryResponseProto defaultInstance;
    public static PrepareRecoveryResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public PrepareRecoveryResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryResponseProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // optional .SegmentStateProto segmentState = 1;
    public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
    public boolean hasSegmentState() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
      return segmentState_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
      return segmentState_;
    }
    
    // optional uint64 acceptedInEpoch = 2;
    public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
    private long acceptedInEpoch_;
    public boolean hasAcceptedInEpoch() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public long getAcceptedInEpoch() {
      return acceptedInEpoch_;
    }
    
    // required uint64 lastWriterEpoch = 3;
    public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3;
    private long lastWriterEpoch_;
    public boolean hasLastWriterEpoch() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public long getLastWriterEpoch() {
      return lastWriterEpoch_;
    }
    
    // optional uint64 lastCommittedTxId = 4;
    public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4;
    private long lastCommittedTxId_;
    public boolean hasLastCommittedTxId() {
      return ((bitField0_ & 0x00000008) == 0x00000008);
    }
    public long getLastCommittedTxId() {
      return lastCommittedTxId_;
    }
    
    private void initFields() {
      segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
      acceptedInEpoch_ = 0L;
      lastWriterEpoch_ = 0L;
      lastCommittedTxId_ = 0L;
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasLastWriterEpoch()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasSegmentState()) {
        if (!getSegmentState().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, segmentState_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeUInt64(2, acceptedInEpoch_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeUInt64(3, lastWriterEpoch_);
      }
      if (((bitField0_ & 0x00000008) == 0x00000008)) {
        output.writeUInt64(4, lastCommittedTxId_);
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, segmentState_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(2, acceptedInEpoch_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(3, lastWriterEpoch_);
      }
      if (((bitField0_ & 0x00000008) == 0x00000008)) {
        size += com.google.protobuf.CodedOutputStream
          .computeUInt64Size(4, lastCommittedTxId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj;
      
      boolean result = true;
      result = result && (hasSegmentState() == other.hasSegmentState());
      if (hasSegmentState()) {
        result = result && getSegmentState()
            .equals(other.getSegmentState());
      }
      result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
      if (hasAcceptedInEpoch()) {
        result = result && (getAcceptedInEpoch()
            == other.getAcceptedInEpoch());
      }
      result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch());
      if (hasLastWriterEpoch()) {
        result = result && (getLastWriterEpoch()
            == other.getLastWriterEpoch());
      }
      result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId());
      if (hasLastCommittedTxId()) {
        result = result && (getLastCommittedTxId()
            == other.getLastCommittedTxId());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasSegmentState()) {
        hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
        hash = (53 * hash) + getSegmentState().hashCode();
      }
      if (hasAcceptedInEpoch()) {
        hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getAcceptedInEpoch());
      }
      if (hasLastWriterEpoch()) {
        hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getLastWriterEpoch());
      }
      if (hasLastCommittedTxId()) {
        hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER;
        hash = (53 * hash) + hashLong(getLastCommittedTxId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_PrepareRecoveryResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getSegmentStateFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (segmentStateBuilder_ == null) {
          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
        } else {
          segmentStateBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        acceptedInEpoch_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000002);
        lastWriterEpoch_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000004);
        lastCommittedTxId_ = 0L;
        bitField0_ = (bitField0_ & ~0x00000008);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (segmentStateBuilder_ == null) {
          result.segmentState_ = segmentState_;
        } else {
          result.segmentState_ = segmentStateBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        result.acceptedInEpoch_ = acceptedInEpoch_;
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.lastWriterEpoch_ = lastWriterEpoch_;
        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
          to_bitField0_ |= 0x00000008;
        }
        result.lastCommittedTxId_ = lastCommittedTxId_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this;
        if (other.hasSegmentState()) {
          mergeSegmentState(other.getSegmentState());
        }
        if (other.hasAcceptedInEpoch()) {
          setAcceptedInEpoch(other.getAcceptedInEpoch());
        }
        if (other.hasLastWriterEpoch()) {
          setLastWriterEpoch(other.getLastWriterEpoch());
        }
        if (other.hasLastCommittedTxId()) {
          setLastCommittedTxId(other.getLastCommittedTxId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasLastWriterEpoch()) {
          
          return false;
        }
        if (hasSegmentState()) {
          if (!getSegmentState().isInitialized()) {
            
            return false;
          }
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder();
              if (hasSegmentState()) {
                subBuilder.mergeFrom(getSegmentState());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setSegmentState(subBuilder.buildPartial());
              break;
            }
            case 16: {
              bitField0_ |= 0x00000002;
              acceptedInEpoch_ = input.readUInt64();
              break;
            }
            case 24: {
              bitField0_ |= 0x00000004;
              lastWriterEpoch_ = input.readUInt64();
              break;
            }
            case 32: {
              bitField0_ |= 0x00000008;
              lastCommittedTxId_ = input.readUInt64();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // optional .SegmentStateProto segmentState = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
      public boolean hasSegmentState() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
        if (segmentStateBuilder_ == null) {
          return segmentState_;
        } else {
          return segmentStateBuilder_.getMessage();
        }
      }
      public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
        if (segmentStateBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          segmentState_ = value;
          onChanged();
        } else {
          segmentStateBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setSegmentState(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
        if (segmentStateBuilder_ == null) {
          segmentState_ = builderForValue.build();
          onChanged();
        } else {
          segmentStateBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
        if (segmentStateBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
            segmentState_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
          } else {
            segmentState_ = value;
          }
          onChanged();
        } else {
          segmentStateBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearSegmentState() {
        if (segmentStateBuilder_ == null) {
          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
          onChanged();
        } else {
          segmentStateBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getSegmentStateFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
        if (segmentStateBuilder_ != null) {
          return segmentStateBuilder_.getMessageOrBuilder();
        } else {
          return segmentState_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
          getSegmentStateFieldBuilder() {
        if (segmentStateBuilder_ == null) {
          segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
                  segmentState_,
                  getParentForChildren(),
                  isClean());
          segmentState_ = null;
        }
        return segmentStateBuilder_;
      }
      
      // optional uint64 acceptedInEpoch = 2;
      private long acceptedInEpoch_ ;
      public boolean hasAcceptedInEpoch() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public long getAcceptedInEpoch() {
        return acceptedInEpoch_;
      }
      public Builder setAcceptedInEpoch(long value) {
        bitField0_ |= 0x00000002;
        acceptedInEpoch_ = value;
        onChanged();
        return this;
      }
      public Builder clearAcceptedInEpoch() {
        bitField0_ = (bitField0_ & ~0x00000002);
        acceptedInEpoch_ = 0L;
        onChanged();
        return this;
      }
      
      // required uint64 lastWriterEpoch = 3;
      private long lastWriterEpoch_ ;
      public boolean hasLastWriterEpoch() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public long getLastWriterEpoch() {
        return lastWriterEpoch_;
      }
      public Builder setLastWriterEpoch(long value) {
        bitField0_ |= 0x00000004;
        lastWriterEpoch_ = value;
        onChanged();
        return this;
      }
      public Builder clearLastWriterEpoch() {
        bitField0_ = (bitField0_ & ~0x00000004);
        lastWriterEpoch_ = 0L;
        onChanged();
        return this;
      }
      
      // optional uint64 lastCommittedTxId = 4;
      private long lastCommittedTxId_ ;
      public boolean hasLastCommittedTxId() {
        return ((bitField0_ & 0x00000008) == 0x00000008);
      }
      public long getLastCommittedTxId() {
        return lastCommittedTxId_;
      }
      public Builder setLastCommittedTxId(long value) {
        bitField0_ |= 0x00000008;
        lastCommittedTxId_ = value;
        onChanged();
        return this;
      }
      public Builder clearLastCommittedTxId() {
        bitField0_ = (bitField0_ & ~0x00000008);
        lastCommittedTxId_ = 0L;
        onChanged();
        return this;
      }
      
      // @@protoc_insertion_point(builder_scope:PrepareRecoveryResponseProto)
    }
    
    static {
      defaultInstance = new PrepareRecoveryResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:PrepareRecoveryResponseProto)
  }
  
  public interface AcceptRecoveryRequestProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
    
    // required .RequestInfoProto reqInfo = 1;
    boolean hasReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
    
    // required .SegmentStateProto stateToAccept = 2;
    boolean hasStateToAccept();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept();
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder();
    
    // required string fromURL = 3;
    boolean hasFromURL();
    String getFromURL();
  }
  public static final class AcceptRecoveryRequestProto extends
      com.google.protobuf.GeneratedMessage
      implements AcceptRecoveryRequestProtoOrBuilder {
    // Use AcceptRecoveryRequestProto.newBuilder() to construct.
    private AcceptRecoveryRequestProto(Builder builder) {
      super(builder);
    }
    private AcceptRecoveryRequestProto(boolean noInit) {}
    
    private static final AcceptRecoveryRequestProto defaultInstance;
    public static AcceptRecoveryRequestProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public AcceptRecoveryRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryRequestProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryRequestProto_fieldAccessorTable;
    }
    
    private int bitField0_;
    // required .RequestInfoProto reqInfo = 1;
    public static final int REQINFO_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
    public boolean hasReqInfo() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
      return reqInfo_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
      return reqInfo_;
    }
    
    // required .SegmentStateProto stateToAccept = 2;
    public static final int STATETOACCEPT_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_;
    public boolean hasStateToAccept() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
      return stateToAccept_;
    }
    public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
      return stateToAccept_;
    }
    
    // required string fromURL = 3;
    public static final int FROMURL_FIELD_NUMBER = 3;
    private java.lang.Object fromURL_;
    public boolean hasFromURL() {
      return ((bitField0_ & 0x00000004) == 0x00000004);
    }
    public String getFromURL() {
      java.lang.Object ref = fromURL_;
      if (ref instanceof String) {
        return (String) ref;
      } else {
        com.google.protobuf.ByteString bs = 
            (com.google.protobuf.ByteString) ref;
        String s = bs.toStringUtf8();
        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
          fromURL_ = s;
        }
        return s;
      }
    }
    private com.google.protobuf.ByteString getFromURLBytes() {
      java.lang.Object ref = fromURL_;
      if (ref instanceof String) {
        com.google.protobuf.ByteString b = 
            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
        fromURL_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }
    
    private void initFields() {
      reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
      fromURL_ = "";
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      if (!hasReqInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasStateToAccept()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFromURL()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getReqInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getStateToAccept().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeMessage(2, stateToAccept_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        output.writeBytes(3, getFromURLBytes());
      }
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, reqInfo_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += com.google.protobuf.CodedOutputStream
          .computeMessageSize(2, stateToAccept_);
      }
      if (((bitField0_ & 0x00000004) == 0x00000004)) {
        size += com.google.protobuf.CodedOutputStream
          .computeBytesSize(3, getFromURLBytes());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
      
      boolean result = true;
      result = result && (hasReqInfo() == other.hasReqInfo());
      if (hasReqInfo()) {
        result = result && getReqInfo()
            .equals(other.getReqInfo());
      }
      result = result && (hasStateToAccept() == other.hasStateToAccept());
      if (hasStateToAccept()) {
        result = result && getStateToAccept()
            .equals(other.getStateToAccept());
      }
      result = result && (hasFromURL() == other.hasFromURL());
      if (hasFromURL()) {
        result = result && getFromURL()
            .equals(other.getFromURL());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasReqInfo()) {
        hash = (37 * hash) + REQINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReqInfo().hashCode();
      }
      if (hasStateToAccept()) {
        hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER;
        hash = (53 * hash) + getStateToAccept().hashCode();
      }
      if (hasFromURL()) {
        hash = (37 * hash) + FROMURL_FIELD_NUMBER;
        hash = (53 * hash) + getFromURL().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryRequestProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryRequestProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getReqInfoFieldBuilder();
          getStateToAcceptFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        if (stateToAcceptBuilder_ == null) {
          stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
        } else {
          stateToAcceptBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        fromURL_ = "";
        bitField0_ = (bitField0_ & ~0x00000004);
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (reqInfoBuilder_ == null) {
          result.reqInfo_ = reqInfo_;
        } else {
          result.reqInfo_ = reqInfoBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        if (stateToAcceptBuilder_ == null) {
          result.stateToAccept_ = stateToAccept_;
        } else {
          result.stateToAccept_ = stateToAcceptBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
          to_bitField0_ |= 0x00000004;
        }
        result.fromURL_ = fromURL_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this;
        if (other.hasReqInfo()) {
          mergeReqInfo(other.getReqInfo());
        }
        if (other.hasStateToAccept()) {
          mergeStateToAccept(other.getStateToAccept());
        }
        if (other.hasFromURL()) {
          setFromURL(other.getFromURL());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        if (!hasReqInfo()) {
          
          return false;
        }
        if (!hasStateToAccept()) {
          
          return false;
        }
        if (!hasFromURL()) {
          
          return false;
        }
        if (!getReqInfo().isInitialized()) {
          
          return false;
        }
        if (!getStateToAccept().isInitialized()) {
          
          return false;
        }
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
              if (hasReqInfo()) {
                subBuilder.mergeFrom(getReqInfo());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setReqInfo(subBuilder.buildPartial());
              break;
            }
            case 18: {
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder();
              if (hasStateToAccept()) {
                subBuilder.mergeFrom(getStateToAccept());
              }
              input.readMessage(subBuilder, extensionRegistry);
              setStateToAccept(subBuilder.buildPartial());
              break;
            }
            case 26: {
              bitField0_ |= 0x00000004;
              fromURL_ = input.readBytes();
              break;
            }
          }
        }
      }
      
      private int bitField0_;
      
      // required .RequestInfoProto reqInfo = 1;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
      public boolean hasReqInfo() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
        if (reqInfoBuilder_ == null) {
          return reqInfo_;
        } else {
          return reqInfoBuilder_.getMessage();
        }
      }
      public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reqInfo_ = value;
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder setReqInfo(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = builderForValue.build();
          onChanged();
        } else {
          reqInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
        if (reqInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
            reqInfo_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
          } else {
            reqInfo_ = value;
          }
          onChanged();
        } else {
          reqInfoBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      public Builder clearReqInfo() {
        if (reqInfoBuilder_ == null) {
          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
          onChanged();
        } else {
          reqInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getReqInfoFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
        if (reqInfoBuilder_ != null) {
          return reqInfoBuilder_.getMessageOrBuilder();
        } else {
          return reqInfo_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
          getReqInfoFieldBuilder() {
        if (reqInfoBuilder_ == null) {
          reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
                  reqInfo_,
                  getParentForChildren(),
                  isClean());
          reqInfo_ = null;
        }
        return reqInfoBuilder_;
      }
      
      // required .SegmentStateProto stateToAccept = 2;
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_;
      public boolean hasStateToAccept() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
        if (stateToAcceptBuilder_ == null) {
          return stateToAccept_;
        } else {
          return stateToAcceptBuilder_.getMessage();
        }
      }
      public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
        if (stateToAcceptBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          stateToAccept_ = value;
          onChanged();
        } else {
          stateToAcceptBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder setStateToAccept(
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
        if (stateToAcceptBuilder_ == null) {
          stateToAccept_ = builderForValue.build();
          onChanged();
        } else {
          stateToAcceptBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
        if (stateToAcceptBuilder_ == null) {
          if (((bitField0_ & 0x00000002) == 0x00000002) &&
              stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
            stateToAccept_ =
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
          } else {
            stateToAccept_ = value;
          }
          onChanged();
        } else {
          stateToAcceptBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      public Builder clearStateToAccept() {
        if (stateToAcceptBuilder_ == null) {
          stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
          onChanged();
        } else {
          stateToAcceptBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getStateToAcceptFieldBuilder().getBuilder();
      }
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
        if (stateToAcceptBuilder_ != null) {
          return stateToAcceptBuilder_.getMessageOrBuilder();
        } else {
          return stateToAccept_;
        }
      }
      private com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
          getStateToAcceptFieldBuilder() {
        if (stateToAcceptBuilder_ == null) {
          stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
                  stateToAccept_,
                  getParentForChildren(),
                  isClean());
          stateToAccept_ = null;
        }
        return stateToAcceptBuilder_;
      }
      
      // required string fromURL = 3;
      private java.lang.Object fromURL_ = "";
      public boolean hasFromURL() {
        return ((bitField0_ & 0x00000004) == 0x00000004);
      }
      public String getFromURL() {
        java.lang.Object ref = fromURL_;
        if (!(ref instanceof String)) {
          String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
          fromURL_ = s;
          return s;
        } else {
          return (String) ref;
        }
      }
      public Builder setFromURL(String value) {
        if (value == null) {
    throw new NullPointerException();
  }
  bitField0_ |= 0x00000004;
        fromURL_ = value;
        onChanged();
        return this;
      }
      public Builder clearFromURL() {
        bitField0_ = (bitField0_ & ~0x00000004);
        fromURL_ = getDefaultInstance().getFromURL();
        onChanged();
        return this;
      }
      void setFromURL(com.google.protobuf.ByteString value) {
        bitField0_ |= 0x00000004;
        fromURL_ = value;
        onChanged();
      }
      
      // @@protoc_insertion_point(builder_scope:AcceptRecoveryRequestProto)
    }
    
    static {
      defaultInstance = new AcceptRecoveryRequestProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:AcceptRecoveryRequestProto)
  }
  
  public interface AcceptRecoveryResponseProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {
  }
  public static final class AcceptRecoveryResponseProto extends
      com.google.protobuf.GeneratedMessage
      implements AcceptRecoveryResponseProtoOrBuilder {
    // Use AcceptRecoveryResponseProto.newBuilder() to construct.
    private AcceptRecoveryResponseProto(Builder builder) {
      super(builder);
    }
    private AcceptRecoveryResponseProto(boolean noInit) {}
    
    private static final AcceptRecoveryResponseProto defaultInstance;
    public static AcceptRecoveryResponseProto getDefaultInstance() {
      return defaultInstance;
    }
    
    public AcceptRecoveryResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryResponseProto_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryResponseProto_fieldAccessorTable;
    }
    
    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;
      
      memoizedIsInitialized = 1;
      return true;
    }
    
    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }
    
    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;
    
      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }
    
    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }
    
    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj;
      
      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }
    
    @java.lang.Override
    public int hashCode() {
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      return hash;
    }
    
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return newBuilder().mergeFrom(data, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      Builder builder = newBuilder();
      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
        return builder.buildParsed();
      } else {
        return null;
      }
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input).buildParsed();
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return newBuilder().mergeFrom(input, extensionRegistry)
               .buildParsed();
    }
    
    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }
    
    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryResponseProto_descriptor;
      }
      
      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_AcceptRecoveryResponseProto_fieldAccessorTable;
      }
      
      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }
      
      private Builder(BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }
      
      public Builder clear() {
        super.clear();
        return this;
      }
      
      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }
      
      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDescriptor();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }
      
      private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildParsed()
          throws com.google.protobuf.InvalidProtocolBufferException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(
            result).asInvalidProtocolBufferException();
        }
        return result;
      }
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this);
        onBuilt();
        return result;
      }
      
      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }
      
      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }
      
      public final boolean isInitialized() {
        return true;
      }
      
      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder(
            this.getUnknownFields());
        while (true) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              this.setUnknownFields(unknownFields.build());
              onChanged();
              return this;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                this.setUnknownFields(unknownFields.build());
                onChanged();
                return this;
              }
              break;
            }
          }
        }
      }
      
      
      // @@protoc_insertion_point(builder_scope:AcceptRecoveryResponseProto)
    }
    
    static {
      defaultInstance = new AcceptRecoveryResponseProto(true);
      defaultInstance.initFields();
    }
    
    // @@protoc_insertion_point(class_scope:AcceptRecoveryResponseProto)
  }
  
  public static abstract class QJournalProtocolService
      implements com.google.protobuf.Service {
    protected QJournalProtocolService() {}
    
    public interface Interface {
      public abstract void isFormatted(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void getJournalState(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void newEpoch(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void format(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void journal(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void heartbeat(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void startLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void finalizeLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void purgeLogs(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void getEditLogManifest(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void prepareRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
          com.google.protobuf.RpcCallback done);
      
      public abstract void acceptRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
          com.google.protobuf.RpcCallback done);
      
    }
    
    public static com.google.protobuf.Service newReflectiveService(
        final Interface impl) {
      return new QJournalProtocolService() {
        @java.lang.Override
        public  void isFormatted(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.isFormatted(controller, request, done);
        }
        
        @java.lang.Override
        public  void getJournalState(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.getJournalState(controller, request, done);
        }
        
        @java.lang.Override
        public  void newEpoch(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.newEpoch(controller, request, done);
        }
        
        @java.lang.Override
        public  void format(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.format(controller, request, done);
        }
        
        @java.lang.Override
        public  void journal(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.journal(controller, request, done);
        }
        
        @java.lang.Override
        public  void heartbeat(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.heartbeat(controller, request, done);
        }
        
        @java.lang.Override
        public  void startLogSegment(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.startLogSegment(controller, request, done);
        }
        
        @java.lang.Override
        public  void finalizeLogSegment(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.finalizeLogSegment(controller, request, done);
        }
        
        @java.lang.Override
        public  void purgeLogs(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.purgeLogs(controller, request, done);
        }
        
        @java.lang.Override
        public  void getEditLogManifest(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.getEditLogManifest(controller, request, done);
        }
        
        @java.lang.Override
        public  void prepareRecovery(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.prepareRecovery(controller, request, done);
        }
        
        @java.lang.Override
        public  void acceptRecovery(
            com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
            com.google.protobuf.RpcCallback done) {
          impl.acceptRecovery(controller, request, done);
        }
        
      };
    }
    
    public static com.google.protobuf.BlockingService
        newReflectiveBlockingService(final BlockingInterface impl) {
      return new com.google.protobuf.BlockingService() {
        public final com.google.protobuf.Descriptors.ServiceDescriptor
            getDescriptorForType() {
          return getDescriptor();
        }
        
        public final com.google.protobuf.Message callBlockingMethod(
            com.google.protobuf.Descriptors.MethodDescriptor method,
            com.google.protobuf.RpcController controller,
            com.google.protobuf.Message request)
            throws com.google.protobuf.ServiceException {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.callBlockingMethod() given method descriptor for " +
              "wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request);
            case 1:
              return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request);
            case 2:
              return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request);
            case 3:
              return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request);
            case 4:
              return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request);
            case 5:
              return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request);
            case 6:
              return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request);
            case 7:
              return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request);
            case 8:
              return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request);
            case 9:
              return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request);
            case 10:
              return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request);
            case 11:
              return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request);
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }
        
        public final com.google.protobuf.Message
            getRequestPrototype(
            com.google.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getRequestPrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
            case 4:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
            case 5:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
            case 6:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
            case 7:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
            case 8:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
            case 9:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
            case 10:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
            case 11:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }
        
        public final com.google.protobuf.Message
            getResponsePrototype(
            com.google.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getResponsePrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
            case 4:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
            case 5:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
            case 6:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
            case 7:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
            case 8:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
            case 9:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
            case 10:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
            case 11:
              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }
        
      };
    }
    
    public abstract void isFormatted(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void getJournalState(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void newEpoch(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void format(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void journal(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void heartbeat(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void startLogSegment(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void finalizeLogSegment(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void purgeLogs(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void getEditLogManifest(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void prepareRecovery(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public abstract void acceptRecovery(
        com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
        com.google.protobuf.RpcCallback done);
    
    public static final
        com.google.protobuf.Descriptors.ServiceDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0);
    }
    public final com.google.protobuf.Descriptors.ServiceDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    
    public final void callMethod(
        com.google.protobuf.Descriptors.MethodDescriptor method,
        com.google.protobuf.RpcController controller,
        com.google.protobuf.Message request,
        com.google.protobuf.RpcCallback<
          com.google.protobuf.Message> done) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.callMethod() given method descriptor for wrong " +
          "service type.");
      }
      switch(method.getIndex()) {
        case 0:
          this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 1:
          this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 2:
          this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 3:
          this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 4:
          this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 5:
          this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 6:
          this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 7:
          this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 8:
          this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 9:
          this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 10:
          this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 11:
          this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request,
            com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }
    
    public final com.google.protobuf.Message
        getRequestPrototype(
        com.google.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getRequestPrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
        case 4:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
        case 5:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
        case 6:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
        case 7:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
        case 8:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
        case 9:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
        case 10:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
        case 11:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }
    
    public final com.google.protobuf.Message
        getResponsePrototype(
        com.google.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getResponsePrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
        case 4:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
        case 5:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
        case 6:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
        case 7:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
        case 8:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
        case 9:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
        case 10:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
        case 11:
          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }
    
    public static Stub newStub(
        com.google.protobuf.RpcChannel channel) {
      return new Stub(channel);
    }
    
    public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface {
      private Stub(com.google.protobuf.RpcChannel channel) {
        this.channel = channel;
      }
      
      private final com.google.protobuf.RpcChannel channel;
      
      public com.google.protobuf.RpcChannel getChannel() {
        return channel;
      }
      
      public  void isFormatted(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()));
      }
      
      public  void getJournalState(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()));
      }
      
      public  void newEpoch(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()));
      }
      
      public  void format(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()));
      }
      
      public  void journal(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(4),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
      }
      
      public  void heartbeat(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(5),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
      }
      
      public  void startLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(6),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
      }
      
      public  void finalizeLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(7),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()));
      }
      
      public  void purgeLogs(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(8),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()));
      }
      
      public  void getEditLogManifest(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(9),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()));
      }
      
      public  void prepareRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(10),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()));
      }
      
      public  void acceptRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
          com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(11),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(),
          com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()));
      }
    }
    
    public static BlockingInterface newBlockingStub(
        com.google.protobuf.BlockingRpcChannel channel) {
      return new BlockingStub(channel);
    }
    
    public interface BlockingInterface {
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
          throws com.google.protobuf.ServiceException;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
          throws com.google.protobuf.ServiceException;
    }
    
    private static final class BlockingStub implements BlockingInterface {
      private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
        this.channel = channel;
      }
      
      private final com.google.protobuf.BlockingRpcChannel channel;
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(4),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(5),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(6),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(7),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(8),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(9),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(10),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance());
      }
      
      
      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
          com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
          throws com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(11),
          controller,
          request,
          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance());
      }
      
    }
  }
  
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_JournalIdProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_JournalIdProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_RequestInfoProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_RequestInfoProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_SegmentStateProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_SegmentStateProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_PersistedRecoveryPaxosData_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_PersistedRecoveryPaxosData_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_JournalRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_JournalRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_JournalResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_JournalResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_HeartbeatRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_HeartbeatRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_HeartbeatResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_HeartbeatResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_StartLogSegmentRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_StartLogSegmentResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_FinalizeLogSegmentRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_FinalizeLogSegmentRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_FinalizeLogSegmentResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_FinalizeLogSegmentResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_PurgeLogsRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_PurgeLogsRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_PurgeLogsResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_PurgeLogsResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_IsFormattedRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_IsFormattedRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_IsFormattedResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_IsFormattedResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_GetJournalStateRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_GetJournalStateRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_GetJournalStateResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_GetJournalStateResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_FormatRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_FormatRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_FormatResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_FormatResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_NewEpochRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_NewEpochRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_NewEpochResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_NewEpochResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_GetEditLogManifestRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_GetEditLogManifestRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_GetEditLogManifestResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_GetEditLogManifestResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_PrepareRecoveryRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_PrepareRecoveryRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_PrepareRecoveryResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_PrepareRecoveryResponseProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_AcceptRecoveryRequestProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_AcceptRecoveryRequestProto_fieldAccessorTable;
  private static com.google.protobuf.Descriptors.Descriptor
    internal_static_AcceptRecoveryResponseProto_descriptor;
  private static
    com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_AcceptRecoveryResponseProto_fieldAccessorTable;
  
  public static com.google.protobuf.Descriptors.FileDescriptor
      getDescriptor() {
    return descriptor;
  }
  private static com.google.protobuf.Descriptors.FileDescriptor
      descriptor;
  static {
    java.lang.String[] descriptorData = {
      "\n\026QJournalProtocol.proto\032\nhdfs.proto\"$\n\016" +
      "JournalIdProto\022\022\n\nidentifier\030\001 \002(\t\"u\n\020Re" +
      "questInfoProto\022\"\n\tjournalId\030\001 \002(\0132\017.Jour" +
      "nalIdProto\022\r\n\005epoch\030\002 \002(\004\022\027\n\017ipcSerialNu" +
      "mber\030\003 \002(\004\022\025\n\rcommittedTxId\030\004 \001(\004\"M\n\021Seg" +
      "mentStateProto\022\021\n\tstartTxId\030\001 \002(\004\022\017\n\007end" +
      "TxId\030\002 \002(\004\022\024\n\014isInProgress\030\003 \002(\010\"_\n\032Pers" +
      "istedRecoveryPaxosData\022(\n\014segmentState\030\001" +
      " \002(\0132\022.SegmentStateProto\022\027\n\017acceptedInEp" +
      "och\030\002 \002(\004\"\205\001\n\023JournalRequestProto\022\"\n\007req",
      "Info\030\001 \002(\0132\021.RequestInfoProto\022\022\n\nfirstTx" +
      "nId\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007records\030\004 " +
      "\002(\014\022\024\n\014segmentTxnId\030\005 \002(\004\"\026\n\024JournalResp" +
      "onseProto\";\n\025HeartbeatRequestProto\022\"\n\007re" +
      "qInfo\030\001 \002(\0132\021.RequestInfoProto\"\030\n\026Heartb" +
      "eatResponseProto\"O\n\033StartLogSegmentReque" +
      "stProto\022\"\n\007reqInfo\030\001 \002(\0132\021.RequestInfoPr" +
      "oto\022\014\n\004txid\030\002 \002(\004\"\036\n\034StartLogSegmentResp" +
      "onseProto\"h\n\036FinalizeLogSegmentRequestPr" +
      "oto\022\"\n\007reqInfo\030\001 \002(\0132\021.RequestInfoProto\022",
      "\021\n\tstartTxId\030\002 \002(\004\022\017\n\007endTxId\030\003 \002(\004\"!\n\037F" +
      "inalizeLogSegmentResponseProto\"R\n\025PurgeL" +
      "ogsRequestProto\022\"\n\007reqInfo\030\001 \002(\0132\021.Reque" +
      "stInfoProto\022\025\n\rminTxIdToKeep\030\002 \002(\004\"\030\n\026Pu" +
      "rgeLogsResponseProto\"7\n\027IsFormattedReque" +
      "stProto\022\034\n\003jid\030\001 \002(\0132\017.JournalIdProto\"/\n" +
      "\030IsFormattedResponseProto\022\023\n\013isFormatted" +
      "\030\001 \002(\010\";\n\033GetJournalStateRequestProto\022\034\n" +
      "\003jid\030\001 \002(\0132\017.JournalIdProto\"K\n\034GetJourna" +
      "lStateResponseProto\022\031\n\021lastPromisedEpoch",
      "\030\001 \002(\004\022\020\n\010httpPort\030\002 \002(\r\"W\n\022FormatReques" +
      "tProto\022\034\n\003jid\030\001 \002(\0132\017.JournalIdProto\022#\n\006" +
      "nsInfo\030\002 \002(\0132\023.NamespaceInfoProto\"\025\n\023For" +
      "matResponseProto\"h\n\024NewEpochRequestProto" +
      "\022\034\n\003jid\030\001 \002(\0132\017.JournalIdProto\022#\n\006nsInfo" +
      "\030\002 \002(\0132\023.NamespaceInfoProto\022\r\n\005epoch\030\003 \002" +
      "(\004\"0\n\025NewEpochResponseProto\022\027\n\017lastSegme" +
      "ntTxId\030\001 \001(\004\"n\n\036GetEditLogManifestReques" +
      "tProto\022\034\n\003jid\030\001 \002(\0132\017.JournalIdProto\022\021\n\t" +
      "sinceTxId\030\002 \002(\004\022\033\n\014inProgressOk\030\004 \001(\010:\005f",
      "alse\"b\n\037GetEditLogManifestResponseProto\022" +
      "-\n\010manifest\030\001 \002(\0132\033.RemoteEditLogManifes" +
      "tProto\022\020\n\010httpPort\030\002 \002(\r\"V\n\033PrepareRecov" +
      "eryRequestProto\022\"\n\007reqInfo\030\001 \002(\0132\021.Reque" +
      "stInfoProto\022\023\n\013segmentTxId\030\002 \002(\004\"\225\001\n\034Pre" +
      "pareRecoveryResponseProto\022(\n\014segmentStat" +
      "e\030\001 \001(\0132\022.SegmentStateProto\022\027\n\017acceptedI" +
      "nEpoch\030\002 \001(\004\022\027\n\017lastWriterEpoch\030\003 \002(\004\022\031\n" +
      "\021lastCommittedTxId\030\004 \001(\004\"|\n\032AcceptRecove" +
      "ryRequestProto\022\"\n\007reqInfo\030\001 \002(\0132\021.Reques",
      "tInfoProto\022)\n\rstateToAccept\030\002 \002(\0132\022.Segm" +
      "entStateProto\022\017\n\007fromURL\030\003 \002(\t\"\035\n\033Accept" +
      "RecoveryResponseProto2\360\006\n\027QJournalProtoc" +
      "olService\022B\n\013isFormatted\022\030.IsFormattedRe" +
      "questProto\032\031.IsFormattedResponseProto\022N\n" +
      "\017getJournalState\022\034.GetJournalStateReques" +
      "tProto\032\035.GetJournalStateResponseProto\0229\n" +
      "\010newEpoch\022\025.NewEpochRequestProto\032\026.NewEp" +
      "ochResponseProto\0223\n\006format\022\023.FormatReque" +
      "stProto\032\024.FormatResponseProto\0226\n\007journal",
      "\022\024.JournalRequestProto\032\025.JournalResponse" +
      "Proto\022<\n\theartbeat\022\026.HeartbeatRequestPro" +
      "to\032\027.HeartbeatResponseProto\022N\n\017startLogS" +
      "egment\022\034.StartLogSegmentRequestProto\032\035.S" +
      "tartLogSegmentResponseProto\022W\n\022finalizeL" +
      "ogSegment\022\037.FinalizeLogSegmentRequestPro" +
      "to\032 .FinalizeLogSegmentResponseProto\022<\n\t" +
      "purgeLogs\022\026.PurgeLogsRequestProto\032\027.Purg" +
      "eLogsResponseProto\022W\n\022getEditLogManifest" +
      "\022\037.GetEditLogManifestRequestProto\032 .GetE",
      "ditLogManifestResponseProto\022N\n\017prepareRe" +
      "covery\022\034.PrepareRecoveryRequestProto\032\035.P" +
      "repareRecoveryResponseProto\022K\n\016acceptRec" +
      "overy\022\033.AcceptRecoveryRequestProto\032\034.Acc" +
      "eptRecoveryResponseProtoBH\n(org.apache.h" +
      "adoop.hdfs.qjournal.protocolB\026QJournalPr" +
      "otocolProtos\210\001\001\240\001\001"
    };
    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
        public com.google.protobuf.ExtensionRegistry assignDescriptors(
            com.google.protobuf.Descriptors.FileDescriptor root) {
          descriptor = root;
          internal_static_JournalIdProto_descriptor =
            getDescriptor().getMessageTypes().get(0);
          internal_static_JournalIdProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_JournalIdProto_descriptor,
              new java.lang.String[] { "Identifier", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
          internal_static_RequestInfoProto_descriptor =
            getDescriptor().getMessageTypes().get(1);
          internal_static_RequestInfoProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_RequestInfoProto_descriptor,
              new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
          internal_static_SegmentStateProto_descriptor =
            getDescriptor().getMessageTypes().get(2);
          internal_static_SegmentStateProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_SegmentStateProto_descriptor,
              new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
          internal_static_PersistedRecoveryPaxosData_descriptor =
            getDescriptor().getMessageTypes().get(3);
          internal_static_PersistedRecoveryPaxosData_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_PersistedRecoveryPaxosData_descriptor,
              new java.lang.String[] { "SegmentState", "AcceptedInEpoch", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
          internal_static_JournalRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(4);
          internal_static_JournalRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_JournalRequestProto_descriptor,
              new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
          internal_static_JournalResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(5);
          internal_static_JournalResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_JournalResponseProto_descriptor,
              new java.lang.String[] { },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
          internal_static_HeartbeatRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(6);
          internal_static_HeartbeatRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_HeartbeatRequestProto_descriptor,
              new java.lang.String[] { "ReqInfo", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
          internal_static_HeartbeatResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(7);
          internal_static_HeartbeatResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_HeartbeatResponseProto_descriptor,
              new java.lang.String[] { },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
          internal_static_StartLogSegmentRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(8);
          internal_static_StartLogSegmentRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_StartLogSegmentRequestProto_descriptor,
              new java.lang.String[] { "ReqInfo", "Txid", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
          internal_static_StartLogSegmentResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(9);
          internal_static_StartLogSegmentResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_StartLogSegmentResponseProto_descriptor,
              new java.lang.String[] { },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
          internal_static_FinalizeLogSegmentRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(10);
          internal_static_FinalizeLogSegmentRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_FinalizeLogSegmentRequestProto_descriptor,
              new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
          internal_static_FinalizeLogSegmentResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(11);
          internal_static_FinalizeLogSegmentResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_FinalizeLogSegmentResponseProto_descriptor,
              new java.lang.String[] { },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
          internal_static_PurgeLogsRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(12);
          internal_static_PurgeLogsRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_PurgeLogsRequestProto_descriptor,
              new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
          internal_static_PurgeLogsResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(13);
          internal_static_PurgeLogsResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_PurgeLogsResponseProto_descriptor,
              new java.lang.String[] { },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
          internal_static_IsFormattedRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(14);
          internal_static_IsFormattedRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_IsFormattedRequestProto_descriptor,
              new java.lang.String[] { "Jid", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
          internal_static_IsFormattedResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(15);
          internal_static_IsFormattedResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_IsFormattedResponseProto_descriptor,
              new java.lang.String[] { "IsFormatted", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
          internal_static_GetJournalStateRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(16);
          internal_static_GetJournalStateRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_GetJournalStateRequestProto_descriptor,
              new java.lang.String[] { "Jid", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
          internal_static_GetJournalStateResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(17);
          internal_static_GetJournalStateResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_GetJournalStateResponseProto_descriptor,
              new java.lang.String[] { "LastPromisedEpoch", "HttpPort", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
          internal_static_FormatRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(18);
          internal_static_FormatRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_FormatRequestProto_descriptor,
              new java.lang.String[] { "Jid", "NsInfo", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
          internal_static_FormatResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(19);
          internal_static_FormatResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_FormatResponseProto_descriptor,
              new java.lang.String[] { },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
          internal_static_NewEpochRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(20);
          internal_static_NewEpochRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_NewEpochRequestProto_descriptor,
              new java.lang.String[] { "Jid", "NsInfo", "Epoch", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
          internal_static_NewEpochResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(21);
          internal_static_NewEpochResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_NewEpochResponseProto_descriptor,
              new java.lang.String[] { "LastSegmentTxId", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
          internal_static_GetEditLogManifestRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(22);
          internal_static_GetEditLogManifestRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_GetEditLogManifestRequestProto_descriptor,
              new java.lang.String[] { "Jid", "SinceTxId", "InProgressOk", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
          internal_static_GetEditLogManifestResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(23);
          internal_static_GetEditLogManifestResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_GetEditLogManifestResponseProto_descriptor,
              new java.lang.String[] { "Manifest", "HttpPort", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
          internal_static_PrepareRecoveryRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(24);
          internal_static_PrepareRecoveryRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_PrepareRecoveryRequestProto_descriptor,
              new java.lang.String[] { "ReqInfo", "SegmentTxId", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
          internal_static_PrepareRecoveryResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(25);
          internal_static_PrepareRecoveryResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_PrepareRecoveryResponseProto_descriptor,
              new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
          internal_static_AcceptRecoveryRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(26);
          internal_static_AcceptRecoveryRequestProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_AcceptRecoveryRequestProto_descriptor,
              new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
          internal_static_AcceptRecoveryResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(27);
          internal_static_AcceptRecoveryResponseProto_fieldAccessorTable = new
            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_AcceptRecoveryResponseProto_descriptor,
              new java.lang.String[] { },
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
          return null;
        }
      };
    com.google.protobuf.Descriptors.FileDescriptor
      .internalBuildGeneratedFileFrom(descriptorData,
        new com.google.protobuf.Descriptors.FileDescriptor[] {
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
        }, assigner);
  }
  
  // @@protoc_insertion_point(outer_class_scope)
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy