All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos Maven / Gradle / Ivy

There is a newer version: 3.4.1
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: QJournalProtocol.proto

package org.apache.hadoop.hdfs.qjournal.protocol;

public final class QJournalProtocolProtos {
  private QJournalProtocolProtos() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public interface JournalIdProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {

    // required string identifier = 1;
    /**
     * required string identifier = 1;
     */
    boolean hasIdentifier();
    /**
     * required string identifier = 1;
     */
    java.lang.String getIdentifier();
    /**
     * required string identifier = 1;
     */
    com.google.protobuf.ByteString
        getIdentifierBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.qjournal.JournalIdProto}
   */
  public static final class JournalIdProto extends
      com.google.protobuf.GeneratedMessage
      implements JournalIdProtoOrBuilder {
    // Use JournalIdProto.newBuilder() to construct.
    private JournalIdProto(com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private JournalIdProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final JournalIdProto defaultInstance;
    public static JournalIdProto getDefaultInstance() {
      return defaultInstance;
    }

    public JournalIdProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private JournalIdProto(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              bitField0_ |= 0x00000001;
              identifier_ = input.readBytes();
              break;
            }
          }
        }
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor;
    }

    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
    }

    public static com.google.protobuf.Parser PARSER =
        new com.google.protobuf.AbstractParser() {
      public JournalIdProto parsePartialFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws com.google.protobuf.InvalidProtocolBufferException {
        return new JournalIdProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // required string identifier = 1;
    public static final int IDENTIFIER_FIELD_NUMBER = 1;
    private java.lang.Object identifier_;
    /**
     * required string identifier = 1;
     */
    public boolean hasIdentifier() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * required string identifier = 1;
     */
    public java.lang.String getIdentifier() {
      java.lang.Object ref = identifier_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        com.google.protobuf.ByteString bs = 
            (com.google.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          identifier_ = s;
        }
        return s;
      }
    }
    /**
     * required string identifier = 1;
     */
    public com.google.protobuf.ByteString
        getIdentifierBytes() {
      java.lang.Object ref = identifier_;
      if (ref instanceof java.lang.String) {
        com.google.protobuf.ByteString b = 
            com.google.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        identifier_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }

    private void initFields() {
      identifier_ = "";
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      if (!hasIdentifier()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeBytes(1, getIdentifierBytes());
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += com.google.protobuf.CodedOutputStream
          .computeBytesSize(1, getIdentifierBytes());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;

      boolean result = true;
      result = result && (hasIdentifier() == other.hasIdentifier());
      if (hasIdentifier()) {
        result = result && getIdentifier()
            .equals(other.getIdentifier());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasIdentifier()) {
        hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
        hash = (53 * hash) + getIdentifier().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.ByteString data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.ByteString data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        byte[] data,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
        java.io.InputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.qjournal.JournalIdProto}
     */
    public static final class Builder extends
        com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
      public static final com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor;
      }

      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        identifier_ = "";
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor;
      }

      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        result.identifier_ = identifier_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
          return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
        if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
        if (other.hasIdentifier()) {
          bitField0_ |= 0x00000001;
          identifier_ = other.identifier_;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        if (!hasIdentifier()) {
          
          return false;
        }
        return true;
      }

      public Builder mergeFrom(
          com.google.protobuf.CodedInputStream input,
          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // required string identifier = 1;
      private java.lang.Object identifier_ = "";
      /**
       * required string identifier = 1;
       */
      public boolean hasIdentifier() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      /**
       * required string identifier = 1;
       */
      public java.lang.String getIdentifier() {
        java.lang.Object ref = identifier_;
        if (!(ref instanceof java.lang.String)) {
          java.lang.String s = ((com.google.protobuf.ByteString) ref)
              .toStringUtf8();
          identifier_ = s;
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string identifier = 1;
       */
      public com.google.protobuf.ByteString
          getIdentifierBytes() {
        java.lang.Object ref = identifier_;
        if (ref instanceof String) {
          com.google.protobuf.ByteString b = 
              com.google.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          identifier_ = b;
          return b;
        } else {
          return (com.google.protobuf.ByteString) ref;
        }
      }
      /**
       * required string identifier = 1;
       */
      public Builder setIdentifier(
          java.lang.String value) {
        if (value == null) {
    throw new NullPointerException();
  }
  bitField0_ |= 0x00000001;
        identifier_ = value;
        onChanged();
        return this;
      }
      /**
       * required string identifier = 1;
       */
      public Builder clearIdentifier() {
        bitField0_ = (bitField0_ & ~0x00000001);
        identifier_ = getDefaultInstance().getIdentifier();
        onChanged();
        return this;
      }
      /**
       * required string identifier = 1;
       */
      public Builder setIdentifierBytes(
          com.google.protobuf.ByteString value) {
        if (value == null) {
    throw new NullPointerException();
  }
  bitField0_ |= 0x00000001;
        identifier_ = value;
        onChanged();
        return this;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.JournalIdProto)
    }

    static {
      defaultInstance = new JournalIdProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.JournalIdProto)
  }

  public interface RequestInfoProtoOrBuilder
      extends com.google.protobuf.MessageOrBuilder {

    // required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;
    /**
     * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;
     */
    boolean hasJournalId();
    /**
     * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;
     */
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
    /**
     * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;
     */
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();

    // required uint64 epoch = 2;
    /**
     * required uint64 epoch = 2;
     */
    boolean hasEpoch();
    /**
     * required uint64 epoch = 2;
     */
    long getEpoch();

    // required uint64 ipcSerialNumber = 3;
    /**
     * required uint64 ipcSerialNumber = 3;
     */
    boolean hasIpcSerialNumber();
    /**
     * required uint64 ipcSerialNumber = 3;
     */
    long getIpcSerialNumber();

    // optional uint64 committedTxId = 4;
    /**
     * optional uint64 committedTxId = 4;
     *
     * 
     * Whenever a writer makes a request, it informs
     * the node of the latest committed txid. This may
     * be higher than the transaction data included in the
     * request itself, eg in the case that the node has
     * fallen behind.
     * 
*/ boolean hasCommittedTxId(); /** * optional uint64 committedTxId = 4; * *
     * Whenever a writer makes a request, it informs
     * the node of the latest committed txid. This may
     * be higher than the transaction data included in the
     * request itself, eg in the case that the node has
     * fallen behind.
     * 
*/ long getCommittedTxId(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.RequestInfoProto} */ public static final class RequestInfoProto extends com.google.protobuf.GeneratedMessage implements RequestInfoProtoOrBuilder { // Use RequestInfoProto.newBuilder() to construct. private RequestInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RequestInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RequestInfoProto defaultInstance; public static RequestInfoProto getDefaultInstance() { return defaultInstance; } public RequestInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RequestInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = journalId_.toBuilder(); } journalId_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(journalId_); journalId_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; epoch_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; ipcSerialNumber_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; committedTxId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public RequestInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RequestInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; public static final int JOURNALID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_; /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public boolean hasJournalId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() { return journalId_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() { return journalId_; } // required uint64 epoch = 2; public static final int EPOCH_FIELD_NUMBER = 2; private long epoch_; /** * required uint64 epoch = 2; */ public boolean hasEpoch() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 epoch = 2; */ public long getEpoch() { return epoch_; } // required uint64 ipcSerialNumber = 3; public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3; private long ipcSerialNumber_; /** * required uint64 ipcSerialNumber = 3; */ public boolean hasIpcSerialNumber() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 ipcSerialNumber = 3; */ public long getIpcSerialNumber() { return ipcSerialNumber_; } // optional uint64 committedTxId = 4; public static final int COMMITTEDTXID_FIELD_NUMBER = 4; private long committedTxId_; /** * optional uint64 committedTxId = 4; * *
     * Whenever a writer makes a request, it informs
     * the node of the latest committed txid. This may
     * be higher than the transaction data included in the
     * request itself, eg in the case that the node has
     * fallen behind.
     * 
*/ public boolean hasCommittedTxId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 committedTxId = 4; * *
     * Whenever a writer makes a request, it informs
     * the node of the latest committed txid. This may
     * be higher than the transaction data included in the
     * request itself, eg in the case that the node has
     * fallen behind.
     * 
*/ public long getCommittedTxId() { return committedTxId_; } private void initFields() { journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); epoch_ = 0L; ipcSerialNumber_ = 0L; committedTxId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJournalId()) { memoizedIsInitialized = 0; return false; } if (!hasEpoch()) { memoizedIsInitialized = 0; return false; } if (!hasIpcSerialNumber()) { memoizedIsInitialized = 0; return false; } if (!getJournalId().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, journalId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, epoch_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, ipcSerialNumber_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, committedTxId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, journalId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, epoch_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, ipcSerialNumber_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(4, committedTxId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj; boolean result = true; result = result && (hasJournalId() == other.hasJournalId()); if (hasJournalId()) { result = result && getJournalId() .equals(other.getJournalId()); } result = result && (hasEpoch() == other.hasEpoch()); if (hasEpoch()) { result = result && (getEpoch() == other.getEpoch()); } result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber()); if (hasIpcSerialNumber()) { result = result && (getIpcSerialNumber() == other.getIpcSerialNumber()); } result = result && (hasCommittedTxId() == other.hasCommittedTxId()); if (hasCommittedTxId()) { result = result && (getCommittedTxId() == other.getCommittedTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJournalId()) { hash = (37 * hash) + JOURNALID_FIELD_NUMBER; hash = (53 * hash) + getJournalId().hashCode(); } if (hasEpoch()) { hash = (37 * hash) + EPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEpoch()); } if (hasIpcSerialNumber()) { hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER; hash = (53 * hash) + hashLong(getIpcSerialNumber()); } if (hasCommittedTxId()) { hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCommittedTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.RequestInfoProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJournalIdFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (journalIdBuilder_ == null) { journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { journalIdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); epoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); ipcSerialNumber_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); committedTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (journalIdBuilder_ == null) { result.journalId_ = journalId_; } else { result.journalId_ = journalIdBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.epoch_ = epoch_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.ipcSerialNumber_ = ipcSerialNumber_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.committedTxId_ = committedTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this; if (other.hasJournalId()) { mergeJournalId(other.getJournalId()); } if (other.hasEpoch()) { setEpoch(other.getEpoch()); } if (other.hasIpcSerialNumber()) { setIpcSerialNumber(other.getIpcSerialNumber()); } if (other.hasCommittedTxId()) { setCommittedTxId(other.getCommittedTxId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJournalId()) { return false; } if (!hasEpoch()) { return false; } if (!hasIpcSerialNumber()) { return false; } if (!getJournalId().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public boolean hasJournalId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() { if (journalIdBuilder_ == null) { return journalId_; } else { return journalIdBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (journalIdBuilder_ == null) { if (value == null) { throw new NullPointerException(); } journalId_ = value; onChanged(); } else { journalIdBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public Builder setJournalId( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (journalIdBuilder_ == null) { journalId_ = builderForValue.build(); onChanged(); } else { journalIdBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (journalIdBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial(); } else { journalId_ = value; } onChanged(); } else { journalIdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public Builder clearJournalId() { if (journalIdBuilder_ == null) { journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { journalIdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJournalIdFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() { if (journalIdBuilder_ != null) { return journalIdBuilder_.getMessageOrBuilder(); } else { return journalId_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJournalIdFieldBuilder() { if (journalIdBuilder_ == null) { journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( journalId_, getParentForChildren(), isClean()); journalId_ = null; } return journalIdBuilder_; } // required uint64 epoch = 2; private long epoch_ ; /** * required uint64 epoch = 2; */ public boolean hasEpoch() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 epoch = 2; */ public long getEpoch() { return epoch_; } /** * required uint64 epoch = 2; */ public Builder setEpoch(long value) { bitField0_ |= 0x00000002; epoch_ = value; onChanged(); return this; } /** * required uint64 epoch = 2; */ public Builder clearEpoch() { bitField0_ = (bitField0_ & ~0x00000002); epoch_ = 0L; onChanged(); return this; } // required uint64 ipcSerialNumber = 3; private long ipcSerialNumber_ ; /** * required uint64 ipcSerialNumber = 3; */ public boolean hasIpcSerialNumber() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 ipcSerialNumber = 3; */ public long getIpcSerialNumber() { return ipcSerialNumber_; } /** * required uint64 ipcSerialNumber = 3; */ public Builder setIpcSerialNumber(long value) { bitField0_ |= 0x00000004; ipcSerialNumber_ = value; onChanged(); return this; } /** * required uint64 ipcSerialNumber = 3; */ public Builder clearIpcSerialNumber() { bitField0_ = (bitField0_ & ~0x00000004); ipcSerialNumber_ = 0L; onChanged(); return this; } // optional uint64 committedTxId = 4; private long committedTxId_ ; /** * optional uint64 committedTxId = 4; * *
       * Whenever a writer makes a request, it informs
       * the node of the latest committed txid. This may
       * be higher than the transaction data included in the
       * request itself, eg in the case that the node has
       * fallen behind.
       * 
*/ public boolean hasCommittedTxId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 committedTxId = 4; * *
       * Whenever a writer makes a request, it informs
       * the node of the latest committed txid. This may
       * be higher than the transaction data included in the
       * request itself, eg in the case that the node has
       * fallen behind.
       * 
*/ public long getCommittedTxId() { return committedTxId_; } /** * optional uint64 committedTxId = 4; * *
       * Whenever a writer makes a request, it informs
       * the node of the latest committed txid. This may
       * be higher than the transaction data included in the
       * request itself, eg in the case that the node has
       * fallen behind.
       * 
*/ public Builder setCommittedTxId(long value) { bitField0_ |= 0x00000008; committedTxId_ = value; onChanged(); return this; } /** * optional uint64 committedTxId = 4; * *
       * Whenever a writer makes a request, it informs
       * the node of the latest committed txid. This may
       * be higher than the transaction data included in the
       * request itself, eg in the case that the node has
       * fallen behind.
       * 
*/ public Builder clearCommittedTxId() { bitField0_ = (bitField0_ & ~0x00000008); committedTxId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.RequestInfoProto) } static { defaultInstance = new RequestInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.RequestInfoProto) } public interface SegmentStateProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 startTxId = 1; /** * required uint64 startTxId = 1; */ boolean hasStartTxId(); /** * required uint64 startTxId = 1; */ long getStartTxId(); // required uint64 endTxId = 2; /** * required uint64 endTxId = 2; */ boolean hasEndTxId(); /** * required uint64 endTxId = 2; */ long getEndTxId(); // required bool isInProgress = 3; /** * required bool isInProgress = 3; */ boolean hasIsInProgress(); /** * required bool isInProgress = 3; */ boolean getIsInProgress(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.SegmentStateProto} */ public static final class SegmentStateProto extends com.google.protobuf.GeneratedMessage implements SegmentStateProtoOrBuilder { // Use SegmentStateProto.newBuilder() to construct. private SegmentStateProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SegmentStateProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SegmentStateProto defaultInstance; public static SegmentStateProto getDefaultInstance() { return defaultInstance; } public SegmentStateProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SegmentStateProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; startTxId_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; endTxId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; isInProgress_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public SegmentStateProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new SegmentStateProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 startTxId = 1; public static final int STARTTXID_FIELD_NUMBER = 1; private long startTxId_; /** * required uint64 startTxId = 1; */ public boolean hasStartTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 startTxId = 1; */ public long getStartTxId() { return startTxId_; } // required uint64 endTxId = 2; public static final int ENDTXID_FIELD_NUMBER = 2; private long endTxId_; /** * required uint64 endTxId = 2; */ public boolean hasEndTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 endTxId = 2; */ public long getEndTxId() { return endTxId_; } // required bool isInProgress = 3; public static final int ISINPROGRESS_FIELD_NUMBER = 3; private boolean isInProgress_; /** * required bool isInProgress = 3; */ public boolean hasIsInProgress() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool isInProgress = 3; */ public boolean getIsInProgress() { return isInProgress_; } private void initFields() { startTxId_ = 0L; endTxId_ = 0L; isInProgress_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasStartTxId()) { memoizedIsInitialized = 0; return false; } if (!hasEndTxId()) { memoizedIsInitialized = 0; return false; } if (!hasIsInProgress()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, startTxId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, endTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(3, isInProgress_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, startTxId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, endTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(3, isInProgress_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj; boolean result = true; result = result && (hasStartTxId() == other.hasStartTxId()); if (hasStartTxId()) { result = result && (getStartTxId() == other.getStartTxId()); } result = result && (hasEndTxId() == other.hasEndTxId()); if (hasEndTxId()) { result = result && (getEndTxId() == other.getEndTxId()); } result = result && (hasIsInProgress() == other.hasIsInProgress()); if (hasIsInProgress()) { result = result && (getIsInProgress() == other.getIsInProgress()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStartTxId()) { hash = (37 * hash) + STARTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTxId()); } if (hasEndTxId()) { hash = (37 * hash) + ENDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEndTxId()); } if (hasIsInProgress()) { hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsInProgress()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.SegmentStateProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); startTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); endTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); isInProgress_ = false; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.startTxId_ = startTxId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.endTxId_ = endTxId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.isInProgress_ = isInProgress_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this; if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } if (other.hasEndTxId()) { setEndTxId(other.getEndTxId()); } if (other.hasIsInProgress()) { setIsInProgress(other.getIsInProgress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasStartTxId()) { return false; } if (!hasEndTxId()) { return false; } if (!hasIsInProgress()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 startTxId = 1; private long startTxId_ ; /** * required uint64 startTxId = 1; */ public boolean hasStartTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 startTxId = 1; */ public long getStartTxId() { return startTxId_; } /** * required uint64 startTxId = 1; */ public Builder setStartTxId(long value) { bitField0_ |= 0x00000001; startTxId_ = value; onChanged(); return this; } /** * required uint64 startTxId = 1; */ public Builder clearStartTxId() { bitField0_ = (bitField0_ & ~0x00000001); startTxId_ = 0L; onChanged(); return this; } // required uint64 endTxId = 2; private long endTxId_ ; /** * required uint64 endTxId = 2; */ public boolean hasEndTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 endTxId = 2; */ public long getEndTxId() { return endTxId_; } /** * required uint64 endTxId = 2; */ public Builder setEndTxId(long value) { bitField0_ |= 0x00000002; endTxId_ = value; onChanged(); return this; } /** * required uint64 endTxId = 2; */ public Builder clearEndTxId() { bitField0_ = (bitField0_ & ~0x00000002); endTxId_ = 0L; onChanged(); return this; } // required bool isInProgress = 3; private boolean isInProgress_ ; /** * required bool isInProgress = 3; */ public boolean hasIsInProgress() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool isInProgress = 3; */ public boolean getIsInProgress() { return isInProgress_; } /** * required bool isInProgress = 3; */ public Builder setIsInProgress(boolean value) { bitField0_ |= 0x00000004; isInProgress_ = value; onChanged(); return this; } /** * required bool isInProgress = 3; */ public Builder clearIsInProgress() { bitField0_ = (bitField0_ & ~0x00000004); isInProgress_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.SegmentStateProto) } static { defaultInstance = new SegmentStateProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.SegmentStateProto) } public interface PersistedRecoveryPaxosDataOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ boolean hasSegmentState(); /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState(); /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder(); // required uint64 acceptedInEpoch = 2; /** * required uint64 acceptedInEpoch = 2; */ boolean hasAcceptedInEpoch(); /** * required uint64 acceptedInEpoch = 2; */ long getAcceptedInEpoch(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.PersistedRecoveryPaxosData} * *
   **
   * The storage format used on local disk for previously
   * accepted decisions.
   * 
*/ public static final class PersistedRecoveryPaxosData extends com.google.protobuf.GeneratedMessage implements PersistedRecoveryPaxosDataOrBuilder { // Use PersistedRecoveryPaxosData.newBuilder() to construct. private PersistedRecoveryPaxosData(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private PersistedRecoveryPaxosData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final PersistedRecoveryPaxosData defaultInstance; public static PersistedRecoveryPaxosData getDefaultInstance() { return defaultInstance; } public PersistedRecoveryPaxosData getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PersistedRecoveryPaxosData( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = segmentState_.toBuilder(); } segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(segmentState_); segmentState_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; acceptedInEpoch_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public PersistedRecoveryPaxosData parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new PersistedRecoveryPaxosData(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; public static final int SEGMENTSTATE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_; /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public boolean hasSegmentState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() { return segmentState_; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() { return segmentState_; } // required uint64 acceptedInEpoch = 2; public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2; private long acceptedInEpoch_; /** * required uint64 acceptedInEpoch = 2; */ public boolean hasAcceptedInEpoch() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 acceptedInEpoch = 2; */ public long getAcceptedInEpoch() { return acceptedInEpoch_; } private void initFields() { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); acceptedInEpoch_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSegmentState()) { memoizedIsInitialized = 0; return false; } if (!hasAcceptedInEpoch()) { memoizedIsInitialized = 0; return false; } if (!getSegmentState().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, segmentState_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, acceptedInEpoch_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, segmentState_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, acceptedInEpoch_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj; boolean result = true; result = result && (hasSegmentState() == other.hasSegmentState()); if (hasSegmentState()) { result = result && getSegmentState() .equals(other.getSegmentState()); } result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch()); if (hasAcceptedInEpoch()) { result = result && (getAcceptedInEpoch() == other.getAcceptedInEpoch()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSegmentState()) { hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER; hash = (53 * hash) + getSegmentState().hashCode(); } if (hasAcceptedInEpoch()) { hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getAcceptedInEpoch()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.PersistedRecoveryPaxosData} * *
     **
     * The storage format used on local disk for previously
     * accepted decisions.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getSegmentStateFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); acceptedInEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (segmentStateBuilder_ == null) { result.segmentState_ = segmentState_; } else { result.segmentState_ = segmentStateBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.acceptedInEpoch_ = acceptedInEpoch_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this; if (other.hasSegmentState()) { mergeSegmentState(other.getSegmentState()); } if (other.hasAcceptedInEpoch()) { setAcceptedInEpoch(other.getAcceptedInEpoch()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSegmentState()) { return false; } if (!hasAcceptedInEpoch()) { return false; } if (!getSegmentState().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_; /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public boolean hasSegmentState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() { if (segmentStateBuilder_ == null) { return segmentState_; } else { return segmentStateBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (value == null) { throw new NullPointerException(); } segmentState_ = value; onChanged(); } else { segmentStateBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder setSegmentState( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) { if (segmentStateBuilder_ == null) { segmentState_ = builderForValue.build(); onChanged(); } else { segmentStateBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder clearSegmentState() { if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); onChanged(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() { bitField0_ |= 0x00000001; onChanged(); return getSegmentStateFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() { if (segmentStateBuilder_ != null) { return segmentStateBuilder_.getMessageOrBuilder(); } else { return segmentState_; } } /** * required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> getSegmentStateFieldBuilder() { if (segmentStateBuilder_ == null) { segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>( segmentState_, getParentForChildren(), isClean()); segmentState_ = null; } return segmentStateBuilder_; } // required uint64 acceptedInEpoch = 2; private long acceptedInEpoch_ ; /** * required uint64 acceptedInEpoch = 2; */ public boolean hasAcceptedInEpoch() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 acceptedInEpoch = 2; */ public long getAcceptedInEpoch() { return acceptedInEpoch_; } /** * required uint64 acceptedInEpoch = 2; */ public Builder setAcceptedInEpoch(long value) { bitField0_ |= 0x00000002; acceptedInEpoch_ = value; onChanged(); return this; } /** * required uint64 acceptedInEpoch = 2; */ public Builder clearAcceptedInEpoch() { bitField0_ = (bitField0_ & ~0x00000002); acceptedInEpoch_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PersistedRecoveryPaxosData) } static { defaultInstance = new PersistedRecoveryPaxosData(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PersistedRecoveryPaxosData) } public interface JournalRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ boolean hasReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder(); // required uint64 firstTxnId = 2; /** * required uint64 firstTxnId = 2; */ boolean hasFirstTxnId(); /** * required uint64 firstTxnId = 2; */ long getFirstTxnId(); // required uint32 numTxns = 3; /** * required uint32 numTxns = 3; */ boolean hasNumTxns(); /** * required uint32 numTxns = 3; */ int getNumTxns(); // required bytes records = 4; /** * required bytes records = 4; */ boolean hasRecords(); /** * required bytes records = 4; */ com.google.protobuf.ByteString getRecords(); // required uint64 segmentTxnId = 5; /** * required uint64 segmentTxnId = 5; */ boolean hasSegmentTxnId(); /** * required uint64 segmentTxnId = 5; */ long getSegmentTxnId(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.JournalRequestProto} */ public static final class JournalRequestProto extends com.google.protobuf.GeneratedMessage implements JournalRequestProtoOrBuilder { // Use JournalRequestProto.newBuilder() to construct. private JournalRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private JournalRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final JournalRequestProto defaultInstance; public static JournalRequestProto getDefaultInstance() { return defaultInstance; } public JournalRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private JournalRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqInfo_.toBuilder(); } reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reqInfo_); reqInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; firstTxnId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; numTxns_ = input.readUInt32(); break; } case 34: { bitField0_ |= 0x00000008; records_ = input.readBytes(); break; } case 40: { bitField0_ |= 0x00000010; segmentTxnId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public JournalRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new JournalRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; public static final int REQINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { return reqInfo_; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { return reqInfo_; } // required uint64 firstTxnId = 2; public static final int FIRSTTXNID_FIELD_NUMBER = 2; private long firstTxnId_; /** * required uint64 firstTxnId = 2; */ public boolean hasFirstTxnId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 firstTxnId = 2; */ public long getFirstTxnId() { return firstTxnId_; } // required uint32 numTxns = 3; public static final int NUMTXNS_FIELD_NUMBER = 3; private int numTxns_; /** * required uint32 numTxns = 3; */ public boolean hasNumTxns() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 numTxns = 3; */ public int getNumTxns() { return numTxns_; } // required bytes records = 4; public static final int RECORDS_FIELD_NUMBER = 4; private com.google.protobuf.ByteString records_; /** * required bytes records = 4; */ public boolean hasRecords() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes records = 4; */ public com.google.protobuf.ByteString getRecords() { return records_; } // required uint64 segmentTxnId = 5; public static final int SEGMENTTXNID_FIELD_NUMBER = 5; private long segmentTxnId_; /** * required uint64 segmentTxnId = 5; */ public boolean hasSegmentTxnId() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 segmentTxnId = 5; */ public long getSegmentTxnId() { return segmentTxnId_; } private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); firstTxnId_ = 0L; numTxns_ = 0; records_ = com.google.protobuf.ByteString.EMPTY; segmentTxnId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasReqInfo()) { memoizedIsInitialized = 0; return false; } if (!hasFirstTxnId()) { memoizedIsInitialized = 0; return false; } if (!hasNumTxns()) { memoizedIsInitialized = 0; return false; } if (!hasRecords()) { memoizedIsInitialized = 0; return false; } if (!hasSegmentTxnId()) { memoizedIsInitialized = 0; return false; } if (!getReqInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, firstTxnId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(3, numTxns_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, records_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, segmentTxnId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, firstTxnId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(3, numTxns_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, records_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(5, segmentTxnId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj; boolean result = true; result = result && (hasReqInfo() == other.hasReqInfo()); if (hasReqInfo()) { result = result && getReqInfo() .equals(other.getReqInfo()); } result = result && (hasFirstTxnId() == other.hasFirstTxnId()); if (hasFirstTxnId()) { result = result && (getFirstTxnId() == other.getFirstTxnId()); } result = result && (hasNumTxns() == other.hasNumTxns()); if (hasNumTxns()) { result = result && (getNumTxns() == other.getNumTxns()); } result = result && (hasRecords() == other.hasRecords()); if (hasRecords()) { result = result && getRecords() .equals(other.getRecords()); } result = result && (hasSegmentTxnId() == other.hasSegmentTxnId()); if (hasSegmentTxnId()) { result = result && (getSegmentTxnId() == other.getSegmentTxnId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasReqInfo()) { hash = (37 * hash) + REQINFO_FIELD_NUMBER; hash = (53 * hash) + getReqInfo().hashCode(); } if (hasFirstTxnId()) { hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFirstTxnId()); } if (hasNumTxns()) { hash = (37 * hash) + NUMTXNS_FIELD_NUMBER; hash = (53 * hash) + getNumTxns(); } if (hasRecords()) { hash = (37 * hash) + RECORDS_FIELD_NUMBER; hash = (53 * hash) + getRecords().hashCode(); } if (hasSegmentTxnId()) { hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSegmentTxnId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.JournalRequestProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getReqInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); firstTxnId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); numTxns_ = 0; bitField0_ = (bitField0_ & ~0x00000004); records_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); segmentTxnId_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (reqInfoBuilder_ == null) { result.reqInfo_ = reqInfo_; } else { result.reqInfo_ = reqInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.firstTxnId_ = firstTxnId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.numTxns_ = numTxns_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.records_ = records_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.segmentTxnId_ = segmentTxnId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } if (other.hasFirstTxnId()) { setFirstTxnId(other.getFirstTxnId()); } if (other.hasNumTxns()) { setNumTxns(other.getNumTxns()); } if (other.hasRecords()) { setRecords(other.getRecords()); } if (other.hasSegmentTxnId()) { setSegmentTxnId(other.getSegmentTxnId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasFirstTxnId()) { return false; } if (!hasNumTxns()) { return false; } if (!hasRecords()) { return false; } if (!hasSegmentTxnId()) { return false; } if (!getReqInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { if (reqInfoBuilder_ == null) { return reqInfo_; } else { return reqInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { if (reqInfoBuilder_ != null) { return reqInfoBuilder_.getMessageOrBuilder(); } else { return reqInfo_; } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; } // required uint64 firstTxnId = 2; private long firstTxnId_ ; /** * required uint64 firstTxnId = 2; */ public boolean hasFirstTxnId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 firstTxnId = 2; */ public long getFirstTxnId() { return firstTxnId_; } /** * required uint64 firstTxnId = 2; */ public Builder setFirstTxnId(long value) { bitField0_ |= 0x00000002; firstTxnId_ = value; onChanged(); return this; } /** * required uint64 firstTxnId = 2; */ public Builder clearFirstTxnId() { bitField0_ = (bitField0_ & ~0x00000002); firstTxnId_ = 0L; onChanged(); return this; } // required uint32 numTxns = 3; private int numTxns_ ; /** * required uint32 numTxns = 3; */ public boolean hasNumTxns() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 numTxns = 3; */ public int getNumTxns() { return numTxns_; } /** * required uint32 numTxns = 3; */ public Builder setNumTxns(int value) { bitField0_ |= 0x00000004; numTxns_ = value; onChanged(); return this; } /** * required uint32 numTxns = 3; */ public Builder clearNumTxns() { bitField0_ = (bitField0_ & ~0x00000004); numTxns_ = 0; onChanged(); return this; } // required bytes records = 4; private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes records = 4; */ public boolean hasRecords() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes records = 4; */ public com.google.protobuf.ByteString getRecords() { return records_; } /** * required bytes records = 4; */ public Builder setRecords(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; records_ = value; onChanged(); return this; } /** * required bytes records = 4; */ public Builder clearRecords() { bitField0_ = (bitField0_ & ~0x00000008); records_ = getDefaultInstance().getRecords(); onChanged(); return this; } // required uint64 segmentTxnId = 5; private long segmentTxnId_ ; /** * required uint64 segmentTxnId = 5; */ public boolean hasSegmentTxnId() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 segmentTxnId = 5; */ public long getSegmentTxnId() { return segmentTxnId_; } /** * required uint64 segmentTxnId = 5; */ public Builder setSegmentTxnId(long value) { bitField0_ |= 0x00000010; segmentTxnId_ = value; onChanged(); return this; } /** * required uint64 segmentTxnId = 5; */ public Builder clearSegmentTxnId() { bitField0_ = (bitField0_ & ~0x00000010); segmentTxnId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.JournalRequestProto) } static { defaultInstance = new JournalRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.JournalRequestProto) } public interface JournalResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.JournalResponseProto} */ public static final class JournalResponseProto extends com.google.protobuf.GeneratedMessage implements JournalResponseProtoOrBuilder { // Use JournalResponseProto.newBuilder() to construct. private JournalResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private JournalResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final JournalResponseProto defaultInstance; public static JournalResponseProto getDefaultInstance() { return defaultInstance; } public JournalResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private JournalResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public JournalResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new JournalResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.JournalResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.JournalResponseProto) } static { defaultInstance = new JournalResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.JournalResponseProto) } public interface HeartbeatRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ boolean hasReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatRequestProto} */ public static final class HeartbeatRequestProto extends com.google.protobuf.GeneratedMessage implements HeartbeatRequestProtoOrBuilder { // Use HeartbeatRequestProto.newBuilder() to construct. private HeartbeatRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private HeartbeatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final HeartbeatRequestProto defaultInstance; public static HeartbeatRequestProto getDefaultInstance() { return defaultInstance; } public HeartbeatRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private HeartbeatRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqInfo_.toBuilder(); } reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reqInfo_); reqInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public HeartbeatRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new HeartbeatRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; public static final int REQINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { return reqInfo_; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { return reqInfo_; } private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasReqInfo()) { memoizedIsInitialized = 0; return false; } if (!getReqInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, reqInfo_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, reqInfo_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj; boolean result = true; result = result && (hasReqInfo() == other.hasReqInfo()); if (hasReqInfo()) { result = result && getReqInfo() .equals(other.getReqInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasReqInfo()) { hash = (37 * hash) + REQINFO_FIELD_NUMBER; hash = (53 * hash) + getReqInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatRequestProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getReqInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (reqInfoBuilder_ == null) { result.reqInfo_ = reqInfo_; } else { result.reqInfo_ = reqInfoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!getReqInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { if (reqInfoBuilder_ == null) { return reqInfo_; } else { return reqInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { if (reqInfoBuilder_ != null) { return reqInfoBuilder_.getMessageOrBuilder(); } else { return reqInfo_; } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.HeartbeatRequestProto) } static { defaultInstance = new HeartbeatRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.HeartbeatRequestProto) } public interface HeartbeatResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatResponseProto} * *
   * void response
   * 
*/ public static final class HeartbeatResponseProto extends com.google.protobuf.GeneratedMessage implements HeartbeatResponseProtoOrBuilder { // Use HeartbeatResponseProto.newBuilder() to construct. private HeartbeatResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private HeartbeatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final HeartbeatResponseProto defaultInstance; public static HeartbeatResponseProto getDefaultInstance() { return defaultInstance; } public HeartbeatResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private HeartbeatResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public HeartbeatResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new HeartbeatResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.HeartbeatResponseProto) } static { defaultInstance = new HeartbeatResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.HeartbeatResponseProto) } public interface StartLogSegmentRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ boolean hasReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder(); // required uint64 txid = 2; /** * required uint64 txid = 2; * *
     * Transaction ID
     * 
*/ boolean hasTxid(); /** * required uint64 txid = 2; * *
     * Transaction ID
     * 
*/ long getTxid(); // optional sint32 layoutVersion = 3; /** * optional sint32 layoutVersion = 3; * *
     * the LayoutVersion in the client
     * 
*/ boolean hasLayoutVersion(); /** * optional sint32 layoutVersion = 3; * *
     * the LayoutVersion in the client
     * 
*/ int getLayoutVersion(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentRequestProto} * *
   **
   * startLogSegment()
   * 
*/ public static final class StartLogSegmentRequestProto extends com.google.protobuf.GeneratedMessage implements StartLogSegmentRequestProtoOrBuilder { // Use StartLogSegmentRequestProto.newBuilder() to construct. private StartLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StartLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StartLogSegmentRequestProto defaultInstance; public static StartLogSegmentRequestProto getDefaultInstance() { return defaultInstance; } public StartLogSegmentRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StartLogSegmentRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqInfo_.toBuilder(); } reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reqInfo_); reqInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; txid_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; layoutVersion_ = input.readSInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StartLogSegmentRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StartLogSegmentRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; public static final int REQINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { return reqInfo_; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { return reqInfo_; } // required uint64 txid = 2; public static final int TXID_FIELD_NUMBER = 2; private long txid_; /** * required uint64 txid = 2; * *
     * Transaction ID
     * 
*/ public boolean hasTxid() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 txid = 2; * *
     * Transaction ID
     * 
*/ public long getTxid() { return txid_; } // optional sint32 layoutVersion = 3; public static final int LAYOUTVERSION_FIELD_NUMBER = 3; private int layoutVersion_; /** * optional sint32 layoutVersion = 3; * *
     * the LayoutVersion in the client
     * 
*/ public boolean hasLayoutVersion() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional sint32 layoutVersion = 3; * *
     * the LayoutVersion in the client
     * 
*/ public int getLayoutVersion() { return layoutVersion_; } private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); txid_ = 0L; layoutVersion_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasReqInfo()) { memoizedIsInitialized = 0; return false; } if (!hasTxid()) { memoizedIsInitialized = 0; return false; } if (!getReqInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, txid_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeSInt32(3, layoutVersion_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, txid_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeSInt32Size(3, layoutVersion_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj; boolean result = true; result = result && (hasReqInfo() == other.hasReqInfo()); if (hasReqInfo()) { result = result && getReqInfo() .equals(other.getReqInfo()); } result = result && (hasTxid() == other.hasTxid()); if (hasTxid()) { result = result && (getTxid() == other.getTxid()); } result = result && (hasLayoutVersion() == other.hasLayoutVersion()); if (hasLayoutVersion()) { result = result && (getLayoutVersion() == other.getLayoutVersion()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasReqInfo()) { hash = (37 * hash) + REQINFO_FIELD_NUMBER; hash = (53 * hash) + getReqInfo().hashCode(); } if (hasTxid()) { hash = (37 * hash) + TXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getTxid()); } if (hasLayoutVersion()) { hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER; hash = (53 * hash) + getLayoutVersion(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentRequestProto} * *
     **
     * startLogSegment()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getReqInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); txid_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); layoutVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (reqInfoBuilder_ == null) { result.reqInfo_ = reqInfo_; } else { result.reqInfo_ = reqInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.txid_ = txid_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.layoutVersion_ = layoutVersion_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } if (other.hasTxid()) { setTxid(other.getTxid()); } if (other.hasLayoutVersion()) { setLayoutVersion(other.getLayoutVersion()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasTxid()) { return false; } if (!getReqInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { if (reqInfoBuilder_ == null) { return reqInfo_; } else { return reqInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { if (reqInfoBuilder_ != null) { return reqInfoBuilder_.getMessageOrBuilder(); } else { return reqInfo_; } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; } // required uint64 txid = 2; private long txid_ ; /** * required uint64 txid = 2; * *
       * Transaction ID
       * 
*/ public boolean hasTxid() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 txid = 2; * *
       * Transaction ID
       * 
*/ public long getTxid() { return txid_; } /** * required uint64 txid = 2; * *
       * Transaction ID
       * 
*/ public Builder setTxid(long value) { bitField0_ |= 0x00000002; txid_ = value; onChanged(); return this; } /** * required uint64 txid = 2; * *
       * Transaction ID
       * 
*/ public Builder clearTxid() { bitField0_ = (bitField0_ & ~0x00000002); txid_ = 0L; onChanged(); return this; } // optional sint32 layoutVersion = 3; private int layoutVersion_ ; /** * optional sint32 layoutVersion = 3; * *
       * the LayoutVersion in the client
       * 
*/ public boolean hasLayoutVersion() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional sint32 layoutVersion = 3; * *
       * the LayoutVersion in the client
       * 
*/ public int getLayoutVersion() { return layoutVersion_; } /** * optional sint32 layoutVersion = 3; * *
       * the LayoutVersion in the client
       * 
*/ public Builder setLayoutVersion(int value) { bitField0_ |= 0x00000004; layoutVersion_ = value; onChanged(); return this; } /** * optional sint32 layoutVersion = 3; * *
       * the LayoutVersion in the client
       * 
*/ public Builder clearLayoutVersion() { bitField0_ = (bitField0_ & ~0x00000004); layoutVersion_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.StartLogSegmentRequestProto) } static { defaultInstance = new StartLogSegmentRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.StartLogSegmentRequestProto) } public interface StartLogSegmentResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentResponseProto} */ public static final class StartLogSegmentResponseProto extends com.google.protobuf.GeneratedMessage implements StartLogSegmentResponseProtoOrBuilder { // Use StartLogSegmentResponseProto.newBuilder() to construct. private StartLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StartLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StartLogSegmentResponseProto defaultInstance; public static StartLogSegmentResponseProto getDefaultInstance() { return defaultInstance; } public StartLogSegmentResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StartLogSegmentResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StartLogSegmentResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StartLogSegmentResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.StartLogSegmentResponseProto) } static { defaultInstance = new StartLogSegmentResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.StartLogSegmentResponseProto) } public interface FinalizeLogSegmentRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ boolean hasReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder(); // required uint64 startTxId = 2; /** * required uint64 startTxId = 2; */ boolean hasStartTxId(); /** * required uint64 startTxId = 2; */ long getStartTxId(); // required uint64 endTxId = 3; /** * required uint64 endTxId = 3; */ boolean hasEndTxId(); /** * required uint64 endTxId = 3; */ long getEndTxId(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto} * *
   **
   * finalizeLogSegment()
   * 
*/ public static final class FinalizeLogSegmentRequestProto extends com.google.protobuf.GeneratedMessage implements FinalizeLogSegmentRequestProtoOrBuilder { // Use FinalizeLogSegmentRequestProto.newBuilder() to construct. private FinalizeLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FinalizeLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FinalizeLogSegmentRequestProto defaultInstance; public static FinalizeLogSegmentRequestProto getDefaultInstance() { return defaultInstance; } public FinalizeLogSegmentRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FinalizeLogSegmentRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqInfo_.toBuilder(); } reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reqInfo_); reqInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; startTxId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; endTxId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public FinalizeLogSegmentRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new FinalizeLogSegmentRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; public static final int REQINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { return reqInfo_; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { return reqInfo_; } // required uint64 startTxId = 2; public static final int STARTTXID_FIELD_NUMBER = 2; private long startTxId_; /** * required uint64 startTxId = 2; */ public boolean hasStartTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 startTxId = 2; */ public long getStartTxId() { return startTxId_; } // required uint64 endTxId = 3; public static final int ENDTXID_FIELD_NUMBER = 3; private long endTxId_; /** * required uint64 endTxId = 3; */ public boolean hasEndTxId() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 endTxId = 3; */ public long getEndTxId() { return endTxId_; } private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); startTxId_ = 0L; endTxId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasReqInfo()) { memoizedIsInitialized = 0; return false; } if (!hasStartTxId()) { memoizedIsInitialized = 0; return false; } if (!hasEndTxId()) { memoizedIsInitialized = 0; return false; } if (!getReqInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, startTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, endTxId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, startTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, endTxId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj; boolean result = true; result = result && (hasReqInfo() == other.hasReqInfo()); if (hasReqInfo()) { result = result && getReqInfo() .equals(other.getReqInfo()); } result = result && (hasStartTxId() == other.hasStartTxId()); if (hasStartTxId()) { result = result && (getStartTxId() == other.getStartTxId()); } result = result && (hasEndTxId() == other.hasEndTxId()); if (hasEndTxId()) { result = result && (getEndTxId() == other.getEndTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasReqInfo()) { hash = (37 * hash) + REQINFO_FIELD_NUMBER; hash = (53 * hash) + getReqInfo().hashCode(); } if (hasStartTxId()) { hash = (37 * hash) + STARTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTxId()); } if (hasEndTxId()) { hash = (37 * hash) + ENDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEndTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto} * *
     **
     * finalizeLogSegment()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getReqInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); startTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); endTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (reqInfoBuilder_ == null) { result.reqInfo_ = reqInfo_; } else { result.reqInfo_ = reqInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.startTxId_ = startTxId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.endTxId_ = endTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } if (other.hasEndTxId()) { setEndTxId(other.getEndTxId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasStartTxId()) { return false; } if (!hasEndTxId()) { return false; } if (!getReqInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { if (reqInfoBuilder_ == null) { return reqInfo_; } else { return reqInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { if (reqInfoBuilder_ != null) { return reqInfoBuilder_.getMessageOrBuilder(); } else { return reqInfo_; } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; } // required uint64 startTxId = 2; private long startTxId_ ; /** * required uint64 startTxId = 2; */ public boolean hasStartTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 startTxId = 2; */ public long getStartTxId() { return startTxId_; } /** * required uint64 startTxId = 2; */ public Builder setStartTxId(long value) { bitField0_ |= 0x00000002; startTxId_ = value; onChanged(); return this; } /** * required uint64 startTxId = 2; */ public Builder clearStartTxId() { bitField0_ = (bitField0_ & ~0x00000002); startTxId_ = 0L; onChanged(); return this; } // required uint64 endTxId = 3; private long endTxId_ ; /** * required uint64 endTxId = 3; */ public boolean hasEndTxId() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 endTxId = 3; */ public long getEndTxId() { return endTxId_; } /** * required uint64 endTxId = 3; */ public Builder setEndTxId(long value) { bitField0_ |= 0x00000004; endTxId_ = value; onChanged(); return this; } /** * required uint64 endTxId = 3; */ public Builder clearEndTxId() { bitField0_ = (bitField0_ & ~0x00000004); endTxId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto) } static { defaultInstance = new FinalizeLogSegmentRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto) } public interface FinalizeLogSegmentResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto} */ public static final class FinalizeLogSegmentResponseProto extends com.google.protobuf.GeneratedMessage implements FinalizeLogSegmentResponseProtoOrBuilder { // Use FinalizeLogSegmentResponseProto.newBuilder() to construct. private FinalizeLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FinalizeLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FinalizeLogSegmentResponseProto defaultInstance; public static FinalizeLogSegmentResponseProto getDefaultInstance() { return defaultInstance; } public FinalizeLogSegmentResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FinalizeLogSegmentResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public FinalizeLogSegmentResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new FinalizeLogSegmentResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto) } static { defaultInstance = new FinalizeLogSegmentResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto) } public interface PurgeLogsRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ boolean hasReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder(); // required uint64 minTxIdToKeep = 2; /** * required uint64 minTxIdToKeep = 2; */ boolean hasMinTxIdToKeep(); /** * required uint64 minTxIdToKeep = 2; */ long getMinTxIdToKeep(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsRequestProto} * *
   **
   * purgeLogs()
   * 
*/ public static final class PurgeLogsRequestProto extends com.google.protobuf.GeneratedMessage implements PurgeLogsRequestProtoOrBuilder { // Use PurgeLogsRequestProto.newBuilder() to construct. private PurgeLogsRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private PurgeLogsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final PurgeLogsRequestProto defaultInstance; public static PurgeLogsRequestProto getDefaultInstance() { return defaultInstance; } public PurgeLogsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PurgeLogsRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqInfo_.toBuilder(); } reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reqInfo_); reqInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; minTxIdToKeep_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public PurgeLogsRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new PurgeLogsRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; public static final int REQINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { return reqInfo_; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { return reqInfo_; } // required uint64 minTxIdToKeep = 2; public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2; private long minTxIdToKeep_; /** * required uint64 minTxIdToKeep = 2; */ public boolean hasMinTxIdToKeep() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 minTxIdToKeep = 2; */ public long getMinTxIdToKeep() { return minTxIdToKeep_; } private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); minTxIdToKeep_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasReqInfo()) { memoizedIsInitialized = 0; return false; } if (!hasMinTxIdToKeep()) { memoizedIsInitialized = 0; return false; } if (!getReqInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, minTxIdToKeep_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, minTxIdToKeep_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj; boolean result = true; result = result && (hasReqInfo() == other.hasReqInfo()); if (hasReqInfo()) { result = result && getReqInfo() .equals(other.getReqInfo()); } result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep()); if (hasMinTxIdToKeep()) { result = result && (getMinTxIdToKeep() == other.getMinTxIdToKeep()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasReqInfo()) { hash = (37 * hash) + REQINFO_FIELD_NUMBER; hash = (53 * hash) + getReqInfo().hashCode(); } if (hasMinTxIdToKeep()) { hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMinTxIdToKeep()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsRequestProto} * *
     **
     * purgeLogs()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getReqInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); minTxIdToKeep_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (reqInfoBuilder_ == null) { result.reqInfo_ = reqInfo_; } else { result.reqInfo_ = reqInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.minTxIdToKeep_ = minTxIdToKeep_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } if (other.hasMinTxIdToKeep()) { setMinTxIdToKeep(other.getMinTxIdToKeep()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasMinTxIdToKeep()) { return false; } if (!getReqInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { if (reqInfoBuilder_ == null) { return reqInfo_; } else { return reqInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { if (reqInfoBuilder_ != null) { return reqInfoBuilder_.getMessageOrBuilder(); } else { return reqInfo_; } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; } // required uint64 minTxIdToKeep = 2; private long minTxIdToKeep_ ; /** * required uint64 minTxIdToKeep = 2; */ public boolean hasMinTxIdToKeep() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 minTxIdToKeep = 2; */ public long getMinTxIdToKeep() { return minTxIdToKeep_; } /** * required uint64 minTxIdToKeep = 2; */ public Builder setMinTxIdToKeep(long value) { bitField0_ |= 0x00000002; minTxIdToKeep_ = value; onChanged(); return this; } /** * required uint64 minTxIdToKeep = 2; */ public Builder clearMinTxIdToKeep() { bitField0_ = (bitField0_ & ~0x00000002); minTxIdToKeep_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PurgeLogsRequestProto) } static { defaultInstance = new PurgeLogsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PurgeLogsRequestProto) } public interface PurgeLogsResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsResponseProto} */ public static final class PurgeLogsResponseProto extends com.google.protobuf.GeneratedMessage implements PurgeLogsResponseProtoOrBuilder { // Use PurgeLogsResponseProto.newBuilder() to construct. private PurgeLogsResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private PurgeLogsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final PurgeLogsResponseProto defaultInstance; public static PurgeLogsResponseProto getDefaultInstance() { return defaultInstance; } public PurgeLogsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PurgeLogsResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public PurgeLogsResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new PurgeLogsResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PurgeLogsResponseProto) } static { defaultInstance = new PurgeLogsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PurgeLogsResponseProto) } public interface IsFormattedRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedRequestProto} * *
   **
   * isFormatted()
   * 
*/ public static final class IsFormattedRequestProto extends com.google.protobuf.GeneratedMessage implements IsFormattedRequestProtoOrBuilder { // Use IsFormattedRequestProto.newBuilder() to construct. private IsFormattedRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private IsFormattedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final IsFormattedRequestProto defaultInstance; public static IsFormattedRequestProto getDefaultInstance() { return defaultInstance; } public IsFormattedRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsFormattedRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public IsFormattedRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new IsFormattedRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedRequestProto} * *
     **
     * isFormatted()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.IsFormattedRequestProto) } static { defaultInstance = new IsFormattedRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.IsFormattedRequestProto) } public interface IsFormattedResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required bool isFormatted = 1; /** * required bool isFormatted = 1; */ boolean hasIsFormatted(); /** * required bool isFormatted = 1; */ boolean getIsFormatted(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedResponseProto} */ public static final class IsFormattedResponseProto extends com.google.protobuf.GeneratedMessage implements IsFormattedResponseProtoOrBuilder { // Use IsFormattedResponseProto.newBuilder() to construct. private IsFormattedResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private IsFormattedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final IsFormattedResponseProto defaultInstance; public static IsFormattedResponseProto getDefaultInstance() { return defaultInstance; } public IsFormattedResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsFormattedResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; isFormatted_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public IsFormattedResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new IsFormattedResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool isFormatted = 1; public static final int ISFORMATTED_FIELD_NUMBER = 1; private boolean isFormatted_; /** * required bool isFormatted = 1; */ public boolean hasIsFormatted() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool isFormatted = 1; */ public boolean getIsFormatted() { return isFormatted_; } private void initFields() { isFormatted_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasIsFormatted()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, isFormatted_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(1, isFormatted_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj; boolean result = true; result = result && (hasIsFormatted() == other.hasIsFormatted()); if (hasIsFormatted()) { result = result && (getIsFormatted() == other.getIsFormatted()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasIsFormatted()) { hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsFormatted()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); isFormatted_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.isFormatted_ = isFormatted_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this; if (other.hasIsFormatted()) { setIsFormatted(other.getIsFormatted()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasIsFormatted()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool isFormatted = 1; private boolean isFormatted_ ; /** * required bool isFormatted = 1; */ public boolean hasIsFormatted() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool isFormatted = 1; */ public boolean getIsFormatted() { return isFormatted_; } /** * required bool isFormatted = 1; */ public Builder setIsFormatted(boolean value) { bitField0_ |= 0x00000001; isFormatted_ = value; onChanged(); return this; } /** * required bool isFormatted = 1; */ public Builder clearIsFormatted() { bitField0_ = (bitField0_ & ~0x00000001); isFormatted_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.IsFormattedResponseProto) } static { defaultInstance = new IsFormattedResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.IsFormattedResponseProto) } public interface DiscardSegmentsRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); // required uint64 startTxId = 2; /** * required uint64 startTxId = 2; */ boolean hasStartTxId(); /** * required uint64 startTxId = 2; */ long getStartTxId(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsRequestProto} * *
   **
   * discardSegments()
   * 
*/ public static final class DiscardSegmentsRequestProto extends com.google.protobuf.GeneratedMessage implements DiscardSegmentsRequestProtoOrBuilder { // Use DiscardSegmentsRequestProto.newBuilder() to construct. private DiscardSegmentsRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DiscardSegmentsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DiscardSegmentsRequestProto defaultInstance; public static DiscardSegmentsRequestProto getDefaultInstance() { return defaultInstance; } public DiscardSegmentsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DiscardSegmentsRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; startTxId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DiscardSegmentsRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DiscardSegmentsRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } // required uint64 startTxId = 2; public static final int STARTTXID_FIELD_NUMBER = 2; private long startTxId_; /** * required uint64 startTxId = 2; */ public boolean hasStartTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 startTxId = 2; */ public long getStartTxId() { return startTxId_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); startTxId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!hasStartTxId()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, startTxId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, startTxId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && (hasStartTxId() == other.hasStartTxId()); if (hasStartTxId()) { result = result && (getStartTxId() == other.getStartTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } if (hasStartTxId()) { hash = (37 * hash) + STARTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsRequestProto} * *
     **
     * discardSegments()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); startTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.startTxId_ = startTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!hasStartTxId()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // required uint64 startTxId = 2; private long startTxId_ ; /** * required uint64 startTxId = 2; */ public boolean hasStartTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 startTxId = 2; */ public long getStartTxId() { return startTxId_; } /** * required uint64 startTxId = 2; */ public Builder setStartTxId(long value) { bitField0_ |= 0x00000002; startTxId_ = value; onChanged(); return this; } /** * required uint64 startTxId = 2; */ public Builder clearStartTxId() { bitField0_ = (bitField0_ & ~0x00000002); startTxId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DiscardSegmentsRequestProto) } static { defaultInstance = new DiscardSegmentsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DiscardSegmentsRequestProto) } public interface DiscardSegmentsResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsResponseProto} */ public static final class DiscardSegmentsResponseProto extends com.google.protobuf.GeneratedMessage implements DiscardSegmentsResponseProtoOrBuilder { // Use DiscardSegmentsResponseProto.newBuilder() to construct. private DiscardSegmentsResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DiscardSegmentsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DiscardSegmentsResponseProto defaultInstance; public static DiscardSegmentsResponseProto getDefaultInstance() { return defaultInstance; } public DiscardSegmentsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DiscardSegmentsResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DiscardSegmentsResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DiscardSegmentsResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DiscardSegmentsResponseProto) } static { defaultInstance = new DiscardSegmentsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DiscardSegmentsResponseProto) } public interface GetJournalCTimeRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeRequestProto} * *
   **
   * getJournalCTime()
   * 
*/ public static final class GetJournalCTimeRequestProto extends com.google.protobuf.GeneratedMessage implements GetJournalCTimeRequestProtoOrBuilder { // Use GetJournalCTimeRequestProto.newBuilder() to construct. private GetJournalCTimeRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetJournalCTimeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetJournalCTimeRequestProto defaultInstance; public static GetJournalCTimeRequestProto getDefaultInstance() { return defaultInstance; } public GetJournalCTimeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetJournalCTimeRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetJournalCTimeRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetJournalCTimeRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeRequestProto} * *
     **
     * getJournalCTime()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalCTimeRequestProto) } static { defaultInstance = new GetJournalCTimeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalCTimeRequestProto) } public interface GetJournalCTimeResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required int64 resultCTime = 1; /** * required int64 resultCTime = 1; */ boolean hasResultCTime(); /** * required int64 resultCTime = 1; */ long getResultCTime(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeResponseProto} */ public static final class GetJournalCTimeResponseProto extends com.google.protobuf.GeneratedMessage implements GetJournalCTimeResponseProtoOrBuilder { // Use GetJournalCTimeResponseProto.newBuilder() to construct. private GetJournalCTimeResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetJournalCTimeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetJournalCTimeResponseProto defaultInstance; public static GetJournalCTimeResponseProto getDefaultInstance() { return defaultInstance; } public GetJournalCTimeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetJournalCTimeResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; resultCTime_ = input.readInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetJournalCTimeResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetJournalCTimeResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 resultCTime = 1; public static final int RESULTCTIME_FIELD_NUMBER = 1; private long resultCTime_; /** * required int64 resultCTime = 1; */ public boolean hasResultCTime() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 resultCTime = 1; */ public long getResultCTime() { return resultCTime_; } private void initFields() { resultCTime_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResultCTime()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, resultCTime_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(1, resultCTime_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) obj; boolean result = true; result = result && (hasResultCTime() == other.hasResultCTime()); if (hasResultCTime()) { result = result && (getResultCTime() == other.getResultCTime()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResultCTime()) { hash = (37 * hash) + RESULTCTIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getResultCTime()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); resultCTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.resultCTime_ = resultCTime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance()) return this; if (other.hasResultCTime()) { setResultCTime(other.getResultCTime()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResultCTime()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 resultCTime = 1; private long resultCTime_ ; /** * required int64 resultCTime = 1; */ public boolean hasResultCTime() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 resultCTime = 1; */ public long getResultCTime() { return resultCTime_; } /** * required int64 resultCTime = 1; */ public Builder setResultCTime(long value) { bitField0_ |= 0x00000001; resultCTime_ = value; onChanged(); return this; } /** * required int64 resultCTime = 1; */ public Builder clearResultCTime() { bitField0_ = (bitField0_ & ~0x00000001); resultCTime_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalCTimeResponseProto) } static { defaultInstance = new GetJournalCTimeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalCTimeResponseProto) } public interface DoPreUpgradeRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeRequestProto} * *
   **
   * doPreUpgrade()
   * 
*/ public static final class DoPreUpgradeRequestProto extends com.google.protobuf.GeneratedMessage implements DoPreUpgradeRequestProtoOrBuilder { // Use DoPreUpgradeRequestProto.newBuilder() to construct. private DoPreUpgradeRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoPreUpgradeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoPreUpgradeRequestProto defaultInstance; public static DoPreUpgradeRequestProto getDefaultInstance() { return defaultInstance; } public DoPreUpgradeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoPreUpgradeRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoPreUpgradeRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoPreUpgradeRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeRequestProto} * *
     **
     * doPreUpgrade()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoPreUpgradeRequestProto) } static { defaultInstance = new DoPreUpgradeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoPreUpgradeRequestProto) } public interface DoPreUpgradeResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeResponseProto} */ public static final class DoPreUpgradeResponseProto extends com.google.protobuf.GeneratedMessage implements DoPreUpgradeResponseProtoOrBuilder { // Use DoPreUpgradeResponseProto.newBuilder() to construct. private DoPreUpgradeResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoPreUpgradeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoPreUpgradeResponseProto defaultInstance; public static DoPreUpgradeResponseProto getDefaultInstance() { return defaultInstance; } public DoPreUpgradeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoPreUpgradeResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoPreUpgradeResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoPreUpgradeResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoPreUpgradeResponseProto) } static { defaultInstance = new DoPreUpgradeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoPreUpgradeResponseProto) } public interface DoUpgradeRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); // required .hadoop.hdfs.StorageInfoProto sInfo = 2; /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ boolean hasSInfo(); /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getSInfo(); /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getSInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeRequestProto} * *
   **
   * doUpgrade()
   * 
*/ public static final class DoUpgradeRequestProto extends com.google.protobuf.GeneratedMessage implements DoUpgradeRequestProtoOrBuilder { // Use DoUpgradeRequestProto.newBuilder() to construct. private DoUpgradeRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoUpgradeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoUpgradeRequestProto defaultInstance; public static DoUpgradeRequestProto getDefaultInstance() { return defaultInstance; } public DoUpgradeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoUpgradeRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = sInfo_.toBuilder(); } sInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(sInfo_); sInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoUpgradeRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoUpgradeRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } // required .hadoop.hdfs.StorageInfoProto sInfo = 2; public static final int SINFO_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto sInfo_; /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public boolean hasSInfo() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getSInfo() { return sInfo_; } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getSInfoOrBuilder() { return sInfo_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!hasSInfo()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getSInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, sInfo_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, sInfo_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && (hasSInfo() == other.hasSInfo()); if (hasSInfo()) { result = result && getSInfo() .equals(other.getSInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } if (hasSInfo()) { hash = (37 * hash) + SINFO_FIELD_NUMBER; hash = (53 * hash) + getSInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeRequestProto} * *
     **
     * doUpgrade()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); getSInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (sInfoBuilder_ == null) { sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); } else { sInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (sInfoBuilder_ == null) { result.sInfo_ = sInfo_; } else { result.sInfo_ = sInfoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } if (other.hasSInfo()) { mergeSInfo(other.getSInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!hasSInfo()) { return false; } if (!getJid().isInitialized()) { return false; } if (!getSInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // required .hadoop.hdfs.StorageInfoProto sInfo = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> sInfoBuilder_; /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public boolean hasSInfo() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getSInfo() { if (sInfoBuilder_ == null) { return sInfo_; } else { return sInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public Builder setSInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (sInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } sInfo_ = value; onChanged(); } else { sInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public Builder setSInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) { if (sInfoBuilder_ == null) { sInfo_ = builderForValue.build(); onChanged(); } else { sInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public Builder mergeSInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (sInfoBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && sInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) { sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(sInfo_).mergeFrom(value).buildPartial(); } else { sInfo_ = value; } onChanged(); } else { sInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public Builder clearSInfo() { if (sInfoBuilder_ == null) { sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { sInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getSInfoBuilder() { bitField0_ |= 0x00000002; onChanged(); return getSInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getSInfoOrBuilder() { if (sInfoBuilder_ != null) { return sInfoBuilder_.getMessageOrBuilder(); } else { return sInfo_; } } /** * required .hadoop.hdfs.StorageInfoProto sInfo = 2; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> getSInfoFieldBuilder() { if (sInfoBuilder_ == null) { sInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>( sInfo_, getParentForChildren(), isClean()); sInfo_ = null; } return sInfoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoUpgradeRequestProto) } static { defaultInstance = new DoUpgradeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoUpgradeRequestProto) } public interface DoUpgradeResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeResponseProto} */ public static final class DoUpgradeResponseProto extends com.google.protobuf.GeneratedMessage implements DoUpgradeResponseProtoOrBuilder { // Use DoUpgradeResponseProto.newBuilder() to construct. private DoUpgradeResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoUpgradeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoUpgradeResponseProto defaultInstance; public static DoUpgradeResponseProto getDefaultInstance() { return defaultInstance; } public DoUpgradeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoUpgradeResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoUpgradeResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoUpgradeResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoUpgradeResponseProto) } static { defaultInstance = new DoUpgradeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoUpgradeResponseProto) } public interface DoFinalizeRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeRequestProto} * *
   **
   * doFinalize()
   * 
*/ public static final class DoFinalizeRequestProto extends com.google.protobuf.GeneratedMessage implements DoFinalizeRequestProtoOrBuilder { // Use DoFinalizeRequestProto.newBuilder() to construct. private DoFinalizeRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoFinalizeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoFinalizeRequestProto defaultInstance; public static DoFinalizeRequestProto getDefaultInstance() { return defaultInstance; } public DoFinalizeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoFinalizeRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoFinalizeRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoFinalizeRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeRequestProto} * *
     **
     * doFinalize()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoFinalizeRequestProto) } static { defaultInstance = new DoFinalizeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoFinalizeRequestProto) } public interface DoFinalizeResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeResponseProto} */ public static final class DoFinalizeResponseProto extends com.google.protobuf.GeneratedMessage implements DoFinalizeResponseProtoOrBuilder { // Use DoFinalizeResponseProto.newBuilder() to construct. private DoFinalizeResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoFinalizeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoFinalizeResponseProto defaultInstance; public static DoFinalizeResponseProto getDefaultInstance() { return defaultInstance; } public DoFinalizeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoFinalizeResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoFinalizeResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoFinalizeResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoFinalizeResponseProto) } static { defaultInstance = new DoFinalizeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoFinalizeResponseProto) } public interface CanRollBackRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); // required .hadoop.hdfs.StorageInfoProto storage = 2; /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ boolean hasStorage(); /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorage(); /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageOrBuilder(); // required .hadoop.hdfs.StorageInfoProto prevStorage = 3; /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ boolean hasPrevStorage(); /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getPrevStorage(); /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getPrevStorageOrBuilder(); // required int32 targetLayoutVersion = 4; /** * required int32 targetLayoutVersion = 4; */ boolean hasTargetLayoutVersion(); /** * required int32 targetLayoutVersion = 4; */ int getTargetLayoutVersion(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackRequestProto} * *
   **
   * canRollBack()
   * 
*/ public static final class CanRollBackRequestProto extends com.google.protobuf.GeneratedMessage implements CanRollBackRequestProtoOrBuilder { // Use CanRollBackRequestProto.newBuilder() to construct. private CanRollBackRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CanRollBackRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CanRollBackRequestProto defaultInstance; public static CanRollBackRequestProto getDefaultInstance() { return defaultInstance; } public CanRollBackRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CanRollBackRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = storage_.toBuilder(); } storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(storage_); storage_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = prevStorage_.toBuilder(); } prevStorage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(prevStorage_); prevStorage_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 32: { bitField0_ |= 0x00000008; targetLayoutVersion_ = input.readInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public CanRollBackRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new CanRollBackRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } // required .hadoop.hdfs.StorageInfoProto storage = 2; public static final int STORAGE_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storage_; /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public boolean hasStorage() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorage() { return storage_; } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageOrBuilder() { return storage_; } // required .hadoop.hdfs.StorageInfoProto prevStorage = 3; public static final int PREVSTORAGE_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto prevStorage_; /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public boolean hasPrevStorage() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getPrevStorage() { return prevStorage_; } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getPrevStorageOrBuilder() { return prevStorage_; } // required int32 targetLayoutVersion = 4; public static final int TARGETLAYOUTVERSION_FIELD_NUMBER = 4; private int targetLayoutVersion_; /** * required int32 targetLayoutVersion = 4; */ public boolean hasTargetLayoutVersion() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required int32 targetLayoutVersion = 4; */ public int getTargetLayoutVersion() { return targetLayoutVersion_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); targetLayoutVersion_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!hasStorage()) { memoizedIsInitialized = 0; return false; } if (!hasPrevStorage()) { memoizedIsInitialized = 0; return false; } if (!hasTargetLayoutVersion()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getStorage().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getPrevStorage().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, storage_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, prevStorage_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt32(4, targetLayoutVersion_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, storage_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, prevStorage_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(4, targetLayoutVersion_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && (hasStorage() == other.hasStorage()); if (hasStorage()) { result = result && getStorage() .equals(other.getStorage()); } result = result && (hasPrevStorage() == other.hasPrevStorage()); if (hasPrevStorage()) { result = result && getPrevStorage() .equals(other.getPrevStorage()); } result = result && (hasTargetLayoutVersion() == other.hasTargetLayoutVersion()); if (hasTargetLayoutVersion()) { result = result && (getTargetLayoutVersion() == other.getTargetLayoutVersion()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } if (hasStorage()) { hash = (37 * hash) + STORAGE_FIELD_NUMBER; hash = (53 * hash) + getStorage().hashCode(); } if (hasPrevStorage()) { hash = (37 * hash) + PREVSTORAGE_FIELD_NUMBER; hash = (53 * hash) + getPrevStorage().hashCode(); } if (hasTargetLayoutVersion()) { hash = (37 * hash) + TARGETLAYOUTVERSION_FIELD_NUMBER; hash = (53 * hash) + getTargetLayoutVersion(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackRequestProto} * *
     **
     * canRollBack()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); getStorageFieldBuilder(); getPrevStorageFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (storageBuilder_ == null) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); } else { storageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); if (prevStorageBuilder_ == null) { prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); } else { prevStorageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); targetLayoutVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (storageBuilder_ == null) { result.storage_ = storage_; } else { result.storage_ = storageBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (prevStorageBuilder_ == null) { result.prevStorage_ = prevStorage_; } else { result.prevStorage_ = prevStorageBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.targetLayoutVersion_ = targetLayoutVersion_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } if (other.hasStorage()) { mergeStorage(other.getStorage()); } if (other.hasPrevStorage()) { mergePrevStorage(other.getPrevStorage()); } if (other.hasTargetLayoutVersion()) { setTargetLayoutVersion(other.getTargetLayoutVersion()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!hasStorage()) { return false; } if (!hasPrevStorage()) { return false; } if (!hasTargetLayoutVersion()) { return false; } if (!getJid().isInitialized()) { return false; } if (!getStorage().isInitialized()) { return false; } if (!getPrevStorage().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // required .hadoop.hdfs.StorageInfoProto storage = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> storageBuilder_; /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public boolean hasStorage() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorage() { if (storageBuilder_ == null) { return storage_; } else { return storageBuilder_.getMessage(); } } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (storageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storage_ = value; onChanged(); } else { storageBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public Builder setStorage( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) { if (storageBuilder_ == null) { storage_ = builderForValue.build(); onChanged(); } else { storageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (storageBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(storage_).mergeFrom(value).buildPartial(); } else { storage_ = value; } onChanged(); } else { storageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public Builder clearStorage() { if (storageBuilder_ == null) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { storageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getStorageBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStorageFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageOrBuilder() { if (storageBuilder_ != null) { return storageBuilder_.getMessageOrBuilder(); } else { return storage_; } } /** * required .hadoop.hdfs.StorageInfoProto storage = 2; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> getStorageFieldBuilder() { if (storageBuilder_ == null) { storageBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>( storage_, getParentForChildren(), isClean()); storage_ = null; } return storageBuilder_; } // required .hadoop.hdfs.StorageInfoProto prevStorage = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> prevStorageBuilder_; /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public boolean hasPrevStorage() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getPrevStorage() { if (prevStorageBuilder_ == null) { return prevStorage_; } else { return prevStorageBuilder_.getMessage(); } } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public Builder setPrevStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (prevStorageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } prevStorage_ = value; onChanged(); } else { prevStorageBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public Builder setPrevStorage( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) { if (prevStorageBuilder_ == null) { prevStorage_ = builderForValue.build(); onChanged(); } else { prevStorageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public Builder mergePrevStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (prevStorageBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && prevStorage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) { prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(prevStorage_).mergeFrom(value).buildPartial(); } else { prevStorage_ = value; } onChanged(); } else { prevStorageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public Builder clearPrevStorage() { if (prevStorageBuilder_ == null) { prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { prevStorageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getPrevStorageBuilder() { bitField0_ |= 0x00000004; onChanged(); return getPrevStorageFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getPrevStorageOrBuilder() { if (prevStorageBuilder_ != null) { return prevStorageBuilder_.getMessageOrBuilder(); } else { return prevStorage_; } } /** * required .hadoop.hdfs.StorageInfoProto prevStorage = 3; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> getPrevStorageFieldBuilder() { if (prevStorageBuilder_ == null) { prevStorageBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>( prevStorage_, getParentForChildren(), isClean()); prevStorage_ = null; } return prevStorageBuilder_; } // required int32 targetLayoutVersion = 4; private int targetLayoutVersion_ ; /** * required int32 targetLayoutVersion = 4; */ public boolean hasTargetLayoutVersion() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required int32 targetLayoutVersion = 4; */ public int getTargetLayoutVersion() { return targetLayoutVersion_; } /** * required int32 targetLayoutVersion = 4; */ public Builder setTargetLayoutVersion(int value) { bitField0_ |= 0x00000008; targetLayoutVersion_ = value; onChanged(); return this; } /** * required int32 targetLayoutVersion = 4; */ public Builder clearTargetLayoutVersion() { bitField0_ = (bitField0_ & ~0x00000008); targetLayoutVersion_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.CanRollBackRequestProto) } static { defaultInstance = new CanRollBackRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.CanRollBackRequestProto) } public interface CanRollBackResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required bool canRollBack = 1; /** * required bool canRollBack = 1; */ boolean hasCanRollBack(); /** * required bool canRollBack = 1; */ boolean getCanRollBack(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackResponseProto} */ public static final class CanRollBackResponseProto extends com.google.protobuf.GeneratedMessage implements CanRollBackResponseProtoOrBuilder { // Use CanRollBackResponseProto.newBuilder() to construct. private CanRollBackResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CanRollBackResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CanRollBackResponseProto defaultInstance; public static CanRollBackResponseProto getDefaultInstance() { return defaultInstance; } public CanRollBackResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CanRollBackResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; canRollBack_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public CanRollBackResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new CanRollBackResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool canRollBack = 1; public static final int CANROLLBACK_FIELD_NUMBER = 1; private boolean canRollBack_; /** * required bool canRollBack = 1; */ public boolean hasCanRollBack() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool canRollBack = 1; */ public boolean getCanRollBack() { return canRollBack_; } private void initFields() { canRollBack_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasCanRollBack()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, canRollBack_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(1, canRollBack_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) obj; boolean result = true; result = result && (hasCanRollBack() == other.hasCanRollBack()); if (hasCanRollBack()) { result = result && (getCanRollBack() == other.getCanRollBack()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasCanRollBack()) { hash = (37 * hash) + CANROLLBACK_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getCanRollBack()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); canRollBack_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.canRollBack_ = canRollBack_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance()) return this; if (other.hasCanRollBack()) { setCanRollBack(other.getCanRollBack()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasCanRollBack()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool canRollBack = 1; private boolean canRollBack_ ; /** * required bool canRollBack = 1; */ public boolean hasCanRollBack() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool canRollBack = 1; */ public boolean getCanRollBack() { return canRollBack_; } /** * required bool canRollBack = 1; */ public Builder setCanRollBack(boolean value) { bitField0_ |= 0x00000001; canRollBack_ = value; onChanged(); return this; } /** * required bool canRollBack = 1; */ public Builder clearCanRollBack() { bitField0_ = (bitField0_ & ~0x00000001); canRollBack_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.CanRollBackResponseProto) } static { defaultInstance = new CanRollBackResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.CanRollBackResponseProto) } public interface DoRollbackRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackRequestProto} * *
   **
   * doRollback()
   * 
*/ public static final class DoRollbackRequestProto extends com.google.protobuf.GeneratedMessage implements DoRollbackRequestProtoOrBuilder { // Use DoRollbackRequestProto.newBuilder() to construct. private DoRollbackRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoRollbackRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoRollbackRequestProto defaultInstance; public static DoRollbackRequestProto getDefaultInstance() { return defaultInstance; } public DoRollbackRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoRollbackRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoRollbackRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoRollbackRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackRequestProto} * *
     **
     * doRollback()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoRollbackRequestProto) } static { defaultInstance = new DoRollbackRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoRollbackRequestProto) } public interface DoRollbackResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackResponseProto} */ public static final class DoRollbackResponseProto extends com.google.protobuf.GeneratedMessage implements DoRollbackResponseProtoOrBuilder { // Use DoRollbackResponseProto.newBuilder() to construct. private DoRollbackResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DoRollbackResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DoRollbackResponseProto defaultInstance; public static DoRollbackResponseProto getDefaultInstance() { return defaultInstance; } public DoRollbackResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DoRollbackResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DoRollbackResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DoRollbackResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoRollbackResponseProto) } static { defaultInstance = new DoRollbackResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoRollbackResponseProto) } public interface GetJournalStateRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateRequestProto} * *
   **
   * getJournalState()
   * 
*/ public static final class GetJournalStateRequestProto extends com.google.protobuf.GeneratedMessage implements GetJournalStateRequestProtoOrBuilder { // Use GetJournalStateRequestProto.newBuilder() to construct. private GetJournalStateRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetJournalStateRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetJournalStateRequestProto defaultInstance; public static GetJournalStateRequestProto getDefaultInstance() { return defaultInstance; } public GetJournalStateRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetJournalStateRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetJournalStateRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetJournalStateRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateRequestProto} * *
     **
     * getJournalState()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalStateRequestProto) } static { defaultInstance = new GetJournalStateRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalStateRequestProto) } public interface GetJournalStateResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 lastPromisedEpoch = 1; /** * required uint64 lastPromisedEpoch = 1; */ boolean hasLastPromisedEpoch(); /** * required uint64 lastPromisedEpoch = 1; */ long getLastPromisedEpoch(); // required uint32 httpPort = 2; /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ boolean hasHttpPort(); /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ int getHttpPort(); // optional string fromURL = 3; /** * optional string fromURL = 3; */ boolean hasFromURL(); /** * optional string fromURL = 3; */ java.lang.String getFromURL(); /** * optional string fromURL = 3; */ com.google.protobuf.ByteString getFromURLBytes(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateResponseProto} */ public static final class GetJournalStateResponseProto extends com.google.protobuf.GeneratedMessage implements GetJournalStateResponseProtoOrBuilder { // Use GetJournalStateResponseProto.newBuilder() to construct. private GetJournalStateResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetJournalStateResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetJournalStateResponseProto defaultInstance; public static GetJournalStateResponseProto getDefaultInstance() { return defaultInstance; } public GetJournalStateResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetJournalStateResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; lastPromisedEpoch_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; httpPort_ = input.readUInt32(); break; } case 26: { bitField0_ |= 0x00000004; fromURL_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetJournalStateResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetJournalStateResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 lastPromisedEpoch = 1; public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1; private long lastPromisedEpoch_; /** * required uint64 lastPromisedEpoch = 1; */ public boolean hasLastPromisedEpoch() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 lastPromisedEpoch = 1; */ public long getLastPromisedEpoch() { return lastPromisedEpoch_; } // required uint32 httpPort = 2; public static final int HTTPPORT_FIELD_NUMBER = 2; private int httpPort_; /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ public boolean hasHttpPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ public int getHttpPort() { return httpPort_; } // optional string fromURL = 3; public static final int FROMURL_FIELD_NUMBER = 3; private java.lang.Object fromURL_; /** * optional string fromURL = 3; */ public boolean hasFromURL() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string fromURL = 3; */ public java.lang.String getFromURL() { java.lang.Object ref = fromURL_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromURL_ = s; } return s; } } /** * optional string fromURL = 3; */ public com.google.protobuf.ByteString getFromURLBytes() { java.lang.Object ref = fromURL_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromURL_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { lastPromisedEpoch_ = 0L; httpPort_ = 0; fromURL_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasLastPromisedEpoch()) { memoizedIsInitialized = 0; return false; } if (!hasHttpPort()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, lastPromisedEpoch_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, httpPort_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getFromURLBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, lastPromisedEpoch_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, httpPort_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getFromURLBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj; boolean result = true; result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch()); if (hasLastPromisedEpoch()) { result = result && (getLastPromisedEpoch() == other.getLastPromisedEpoch()); } result = result && (hasHttpPort() == other.hasHttpPort()); if (hasHttpPort()) { result = result && (getHttpPort() == other.getHttpPort()); } result = result && (hasFromURL() == other.hasFromURL()); if (hasFromURL()) { result = result && getFromURL() .equals(other.getFromURL()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLastPromisedEpoch()) { hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastPromisedEpoch()); } if (hasHttpPort()) { hash = (37 * hash) + HTTPPORT_FIELD_NUMBER; hash = (53 * hash) + getHttpPort(); } if (hasFromURL()) { hash = (37 * hash) + FROMURL_FIELD_NUMBER; hash = (53 * hash) + getFromURL().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); lastPromisedEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); httpPort_ = 0; bitField0_ = (bitField0_ & ~0x00000002); fromURL_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.lastPromisedEpoch_ = lastPromisedEpoch_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.httpPort_ = httpPort_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.fromURL_ = fromURL_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this; if (other.hasLastPromisedEpoch()) { setLastPromisedEpoch(other.getLastPromisedEpoch()); } if (other.hasHttpPort()) { setHttpPort(other.getHttpPort()); } if (other.hasFromURL()) { bitField0_ |= 0x00000004; fromURL_ = other.fromURL_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasLastPromisedEpoch()) { return false; } if (!hasHttpPort()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 lastPromisedEpoch = 1; private long lastPromisedEpoch_ ; /** * required uint64 lastPromisedEpoch = 1; */ public boolean hasLastPromisedEpoch() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 lastPromisedEpoch = 1; */ public long getLastPromisedEpoch() { return lastPromisedEpoch_; } /** * required uint64 lastPromisedEpoch = 1; */ public Builder setLastPromisedEpoch(long value) { bitField0_ |= 0x00000001; lastPromisedEpoch_ = value; onChanged(); return this; } /** * required uint64 lastPromisedEpoch = 1; */ public Builder clearLastPromisedEpoch() { bitField0_ = (bitField0_ & ~0x00000001); lastPromisedEpoch_ = 0L; onChanged(); return this; } // required uint32 httpPort = 2; private int httpPort_ ; /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public boolean hasHttpPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public int getHttpPort() { return httpPort_; } /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public Builder setHttpPort(int value) { bitField0_ |= 0x00000002; httpPort_ = value; onChanged(); return this; } /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public Builder clearHttpPort() { bitField0_ = (bitField0_ & ~0x00000002); httpPort_ = 0; onChanged(); return this; } // optional string fromURL = 3; private java.lang.Object fromURL_ = ""; /** * optional string fromURL = 3; */ public boolean hasFromURL() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string fromURL = 3; */ public java.lang.String getFromURL() { java.lang.Object ref = fromURL_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); fromURL_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string fromURL = 3; */ public com.google.protobuf.ByteString getFromURLBytes() { java.lang.Object ref = fromURL_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromURL_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * optional string fromURL = 3; */ public Builder setFromURL( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; } /** * optional string fromURL = 3; */ public Builder clearFromURL() { bitField0_ = (bitField0_ & ~0x00000004); fromURL_ = getDefaultInstance().getFromURL(); onChanged(); return this; } /** * optional string fromURL = 3; */ public Builder setFromURLBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalStateResponseProto) } static { defaultInstance = new GetJournalStateResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalStateResponseProto) } public interface FormatRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ boolean hasNsInfo(); /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getNsInfo(); /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.FormatRequestProto} * *
   **
   * format()
   * 
*/ public static final class FormatRequestProto extends com.google.protobuf.GeneratedMessage implements FormatRequestProtoOrBuilder { // Use FormatRequestProto.newBuilder() to construct. private FormatRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FormatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FormatRequestProto defaultInstance; public static FormatRequestProto getDefaultInstance() { return defaultInstance; } public FormatRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FormatRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = nsInfo_.toBuilder(); } nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(nsInfo_); nsInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public FormatRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new FormatRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; public static final int NSINFO_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto nsInfo_; /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public boolean hasNsInfo() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getNsInfo() { return nsInfo_; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() { return nsInfo_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!hasNsInfo()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getNsInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, nsInfo_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, nsInfo_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && (hasNsInfo() == other.hasNsInfo()); if (hasNsInfo()) { result = result && getNsInfo() .equals(other.getNsInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } if (hasNsInfo()) { hash = (37 * hash) + NSINFO_FIELD_NUMBER; hash = (53 * hash) + getNsInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.FormatRequestProto} * *
     **
     * format()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); getNsInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (nsInfoBuilder_ == null) { nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); } else { nsInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (nsInfoBuilder_ == null) { result.nsInfo_ = nsInfo_; } else { result.nsInfo_ = nsInfoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } if (other.hasNsInfo()) { mergeNsInfo(other.getNsInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!hasNsInfo()) { return false; } if (!getJid().isInitialized()) { return false; } if (!getNsInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_; /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public boolean hasNsInfo() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getNsInfo() { if (nsInfoBuilder_ == null) { return nsInfo_; } else { return nsInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto value) { if (nsInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } nsInfo_ = value; onChanged(); } else { nsInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder setNsInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder builderForValue) { if (nsInfoBuilder_ == null) { nsInfo_ = builderForValue.build(); onChanged(); } else { nsInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto value) { if (nsInfoBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance()) { nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial(); } else { nsInfo_ = value; } onChanged(); } else { nsInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder clearNsInfo() { if (nsInfoBuilder_ == null) { nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); onChanged(); } else { nsInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder getNsInfoBuilder() { bitField0_ |= 0x00000002; onChanged(); return getNsInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() { if (nsInfoBuilder_ != null) { return nsInfoBuilder_.getMessageOrBuilder(); } else { return nsInfo_; } } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder> getNsInfoFieldBuilder() { if (nsInfoBuilder_ == null) { nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder>( nsInfo_, getParentForChildren(), isClean()); nsInfo_ = null; } return nsInfoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FormatRequestProto) } static { defaultInstance = new FormatRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FormatRequestProto) } public interface FormatResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.FormatResponseProto} */ public static final class FormatResponseProto extends com.google.protobuf.GeneratedMessage implements FormatResponseProtoOrBuilder { // Use FormatResponseProto.newBuilder() to construct. private FormatResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FormatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FormatResponseProto defaultInstance; public static FormatResponseProto getDefaultInstance() { return defaultInstance; } public FormatResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FormatResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public FormatResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new FormatResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.FormatResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FormatResponseProto) } static { defaultInstance = new FormatResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FormatResponseProto) } public interface NewEpochRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ boolean hasNsInfo(); /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getNsInfo(); /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder(); // required uint64 epoch = 3; /** * required uint64 epoch = 3; */ boolean hasEpoch(); /** * required uint64 epoch = 3; */ long getEpoch(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochRequestProto} * *
   **
   * newEpoch()
   * 
*/ public static final class NewEpochRequestProto extends com.google.protobuf.GeneratedMessage implements NewEpochRequestProtoOrBuilder { // Use NewEpochRequestProto.newBuilder() to construct. private NewEpochRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private NewEpochRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final NewEpochRequestProto defaultInstance; public static NewEpochRequestProto getDefaultInstance() { return defaultInstance; } public NewEpochRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private NewEpochRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = nsInfo_.toBuilder(); } nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(nsInfo_); nsInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 24: { bitField0_ |= 0x00000004; epoch_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public NewEpochRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new NewEpochRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; public static final int NSINFO_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto nsInfo_; /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public boolean hasNsInfo() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getNsInfo() { return nsInfo_; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() { return nsInfo_; } // required uint64 epoch = 3; public static final int EPOCH_FIELD_NUMBER = 3; private long epoch_; /** * required uint64 epoch = 3; */ public boolean hasEpoch() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 epoch = 3; */ public long getEpoch() { return epoch_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); epoch_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!hasNsInfo()) { memoizedIsInitialized = 0; return false; } if (!hasEpoch()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getNsInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, nsInfo_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, epoch_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, nsInfo_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, epoch_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && (hasNsInfo() == other.hasNsInfo()); if (hasNsInfo()) { result = result && getNsInfo() .equals(other.getNsInfo()); } result = result && (hasEpoch() == other.hasEpoch()); if (hasEpoch()) { result = result && (getEpoch() == other.getEpoch()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } if (hasNsInfo()) { hash = (37 * hash) + NSINFO_FIELD_NUMBER; hash = (53 * hash) + getNsInfo().hashCode(); } if (hasEpoch()) { hash = (37 * hash) + EPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEpoch()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochRequestProto} * *
     **
     * newEpoch()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); getNsInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (nsInfoBuilder_ == null) { nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); } else { nsInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); epoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (nsInfoBuilder_ == null) { result.nsInfo_ = nsInfo_; } else { result.nsInfo_ = nsInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.epoch_ = epoch_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } if (other.hasNsInfo()) { mergeNsInfo(other.getNsInfo()); } if (other.hasEpoch()) { setEpoch(other.getEpoch()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!hasNsInfo()) { return false; } if (!hasEpoch()) { return false; } if (!getJid().isInitialized()) { return false; } if (!getNsInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_; /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public boolean hasNsInfo() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getNsInfo() { if (nsInfoBuilder_ == null) { return nsInfo_; } else { return nsInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto value) { if (nsInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } nsInfo_ = value; onChanged(); } else { nsInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder setNsInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder builderForValue) { if (nsInfoBuilder_ == null) { nsInfo_ = builderForValue.build(); onChanged(); } else { nsInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto value) { if (nsInfoBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance()) { nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial(); } else { nsInfo_ = value; } onChanged(); } else { nsInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public Builder clearNsInfo() { if (nsInfoBuilder_ == null) { nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance(); onChanged(); } else { nsInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder getNsInfoBuilder() { bitField0_ |= 0x00000002; onChanged(); return getNsInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() { if (nsInfoBuilder_ != null) { return nsInfoBuilder_.getMessageOrBuilder(); } else { return nsInfo_; } } /** * required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder> getNsInfoFieldBuilder() { if (nsInfoBuilder_ == null) { nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder>( nsInfo_, getParentForChildren(), isClean()); nsInfo_ = null; } return nsInfoBuilder_; } // required uint64 epoch = 3; private long epoch_ ; /** * required uint64 epoch = 3; */ public boolean hasEpoch() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 epoch = 3; */ public long getEpoch() { return epoch_; } /** * required uint64 epoch = 3; */ public Builder setEpoch(long value) { bitField0_ |= 0x00000004; epoch_ = value; onChanged(); return this; } /** * required uint64 epoch = 3; */ public Builder clearEpoch() { bitField0_ = (bitField0_ & ~0x00000004); epoch_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.NewEpochRequestProto) } static { defaultInstance = new NewEpochRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.NewEpochRequestProto) } public interface NewEpochResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // optional uint64 lastSegmentTxId = 1; /** * optional uint64 lastSegmentTxId = 1; */ boolean hasLastSegmentTxId(); /** * optional uint64 lastSegmentTxId = 1; */ long getLastSegmentTxId(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochResponseProto} */ public static final class NewEpochResponseProto extends com.google.protobuf.GeneratedMessage implements NewEpochResponseProtoOrBuilder { // Use NewEpochResponseProto.newBuilder() to construct. private NewEpochResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private NewEpochResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final NewEpochResponseProto defaultInstance; public static NewEpochResponseProto getDefaultInstance() { return defaultInstance; } public NewEpochResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private NewEpochResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; lastSegmentTxId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public NewEpochResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new NewEpochResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional uint64 lastSegmentTxId = 1; public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1; private long lastSegmentTxId_; /** * optional uint64 lastSegmentTxId = 1; */ public boolean hasLastSegmentTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional uint64 lastSegmentTxId = 1; */ public long getLastSegmentTxId() { return lastSegmentTxId_; } private void initFields() { lastSegmentTxId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, lastSegmentTxId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, lastSegmentTxId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj; boolean result = true; result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId()); if (hasLastSegmentTxId()) { result = result && (getLastSegmentTxId() == other.getLastSegmentTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLastSegmentTxId()) { hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastSegmentTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); lastSegmentTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.lastSegmentTxId_ = lastSegmentTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this; if (other.hasLastSegmentTxId()) { setLastSegmentTxId(other.getLastSegmentTxId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional uint64 lastSegmentTxId = 1; private long lastSegmentTxId_ ; /** * optional uint64 lastSegmentTxId = 1; */ public boolean hasLastSegmentTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional uint64 lastSegmentTxId = 1; */ public long getLastSegmentTxId() { return lastSegmentTxId_; } /** * optional uint64 lastSegmentTxId = 1; */ public Builder setLastSegmentTxId(long value) { bitField0_ |= 0x00000001; lastSegmentTxId_ = value; onChanged(); return this; } /** * optional uint64 lastSegmentTxId = 1; */ public Builder clearLastSegmentTxId() { bitField0_ = (bitField0_ & ~0x00000001); lastSegmentTxId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.NewEpochResponseProto) } static { defaultInstance = new NewEpochResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.NewEpochResponseProto) } public interface GetEditLogManifestRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); // required uint64 sinceTxId = 2; /** * required uint64 sinceTxId = 2; * *
     * Transaction ID
     * 
*/ boolean hasSinceTxId(); /** * required uint64 sinceTxId = 2; * *
     * Transaction ID
     * 
*/ long getSinceTxId(); // optional bool inProgressOk = 4 [default = false]; /** * optional bool inProgressOk = 4 [default = false]; * *
     * Whether or not the client will be reading from the returned streams.
     * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
     * 
*/ boolean hasInProgressOk(); /** * optional bool inProgressOk = 4 [default = false]; * *
     * Whether or not the client will be reading from the returned streams.
     * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
     * 
*/ boolean getInProgressOk(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestRequestProto} * *
   **
   * getEditLogManifest()
   * 
*/ public static final class GetEditLogManifestRequestProto extends com.google.protobuf.GeneratedMessage implements GetEditLogManifestRequestProtoOrBuilder { // Use GetEditLogManifestRequestProto.newBuilder() to construct. private GetEditLogManifestRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetEditLogManifestRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetEditLogManifestRequestProto defaultInstance; public static GetEditLogManifestRequestProto getDefaultInstance() { return defaultInstance; } public GetEditLogManifestRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetEditLogManifestRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; sinceTxId_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000004; inProgressOk_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetEditLogManifestRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetEditLogManifestRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } // required uint64 sinceTxId = 2; public static final int SINCETXID_FIELD_NUMBER = 2; private long sinceTxId_; /** * required uint64 sinceTxId = 2; * *
     * Transaction ID
     * 
*/ public boolean hasSinceTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 sinceTxId = 2; * *
     * Transaction ID
     * 
*/ public long getSinceTxId() { return sinceTxId_; } // optional bool inProgressOk = 4 [default = false]; public static final int INPROGRESSOK_FIELD_NUMBER = 4; private boolean inProgressOk_; /** * optional bool inProgressOk = 4 [default = false]; * *
     * Whether or not the client will be reading from the returned streams.
     * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
     * 
*/ public boolean hasInProgressOk() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bool inProgressOk = 4 [default = false]; * *
     * Whether or not the client will be reading from the returned streams.
     * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
     * 
*/ public boolean getInProgressOk() { return inProgressOk_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); sinceTxId_ = 0L; inProgressOk_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!hasSinceTxId()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, sinceTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(4, inProgressOk_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, sinceTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(4, inProgressOk_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && (hasSinceTxId() == other.hasSinceTxId()); if (hasSinceTxId()) { result = result && (getSinceTxId() == other.getSinceTxId()); } result = result && (hasInProgressOk() == other.hasInProgressOk()); if (hasInProgressOk()) { result = result && (getInProgressOk() == other.getInProgressOk()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } if (hasSinceTxId()) { hash = (37 * hash) + SINCETXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSinceTxId()); } if (hasInProgressOk()) { hash = (37 * hash) + INPROGRESSOK_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getInProgressOk()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestRequestProto} * *
     **
     * getEditLogManifest()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); sinceTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); inProgressOk_ = false; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.sinceTxId_ = sinceTxId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.inProgressOk_ = inProgressOk_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } if (other.hasSinceTxId()) { setSinceTxId(other.getSinceTxId()); } if (other.hasInProgressOk()) { setInProgressOk(other.getInProgressOk()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!hasSinceTxId()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // required uint64 sinceTxId = 2; private long sinceTxId_ ; /** * required uint64 sinceTxId = 2; * *
       * Transaction ID
       * 
*/ public boolean hasSinceTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 sinceTxId = 2; * *
       * Transaction ID
       * 
*/ public long getSinceTxId() { return sinceTxId_; } /** * required uint64 sinceTxId = 2; * *
       * Transaction ID
       * 
*/ public Builder setSinceTxId(long value) { bitField0_ |= 0x00000002; sinceTxId_ = value; onChanged(); return this; } /** * required uint64 sinceTxId = 2; * *
       * Transaction ID
       * 
*/ public Builder clearSinceTxId() { bitField0_ = (bitField0_ & ~0x00000002); sinceTxId_ = 0L; onChanged(); return this; } // optional bool inProgressOk = 4 [default = false]; private boolean inProgressOk_ ; /** * optional bool inProgressOk = 4 [default = false]; * *
       * Whether or not the client will be reading from the returned streams.
       * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
       * 
*/ public boolean hasInProgressOk() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bool inProgressOk = 4 [default = false]; * *
       * Whether or not the client will be reading from the returned streams.
       * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
       * 
*/ public boolean getInProgressOk() { return inProgressOk_; } /** * optional bool inProgressOk = 4 [default = false]; * *
       * Whether or not the client will be reading from the returned streams.
       * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
       * 
*/ public Builder setInProgressOk(boolean value) { bitField0_ |= 0x00000004; inProgressOk_ = value; onChanged(); return this; } /** * optional bool inProgressOk = 4 [default = false]; * *
       * Whether or not the client will be reading from the returned streams.
       * optional bool forReading = 3 [default = true]; <obsolete, do not reuse>
       * 
*/ public Builder clearInProgressOk() { bitField0_ = (bitField0_ & ~0x00000004); inProgressOk_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetEditLogManifestRequestProto) } static { defaultInstance = new GetEditLogManifestRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetEditLogManifestRequestProto) } public interface GetEditLogManifestResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ boolean hasManifest(); /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto getManifest(); /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder(); // required uint32 httpPort = 2; /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ boolean hasHttpPort(); /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ int getHttpPort(); // optional string fromURL = 3; /** * optional string fromURL = 3; */ boolean hasFromURL(); /** * optional string fromURL = 3; */ java.lang.String getFromURL(); /** * optional string fromURL = 3; */ com.google.protobuf.ByteString getFromURLBytes(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestResponseProto} */ public static final class GetEditLogManifestResponseProto extends com.google.protobuf.GeneratedMessage implements GetEditLogManifestResponseProtoOrBuilder { // Use GetEditLogManifestResponseProto.newBuilder() to construct. private GetEditLogManifestResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetEditLogManifestResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetEditLogManifestResponseProto defaultInstance; public static GetEditLogManifestResponseProto getDefaultInstance() { return defaultInstance; } public GetEditLogManifestResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetEditLogManifestResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = manifest_.toBuilder(); } manifest_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(manifest_); manifest_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; httpPort_ = input.readUInt32(); break; } case 26: { bitField0_ |= 0x00000004; fromURL_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetEditLogManifestResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetEditLogManifestResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; public static final int MANIFEST_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto manifest_; /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public boolean hasManifest() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto getManifest() { return manifest_; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { return manifest_; } // required uint32 httpPort = 2; public static final int HTTPPORT_FIELD_NUMBER = 2; private int httpPort_; /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ public boolean hasHttpPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 httpPort = 2; * *
     * Deprecated by fromURL
     * 
*/ public int getHttpPort() { return httpPort_; } // optional string fromURL = 3; public static final int FROMURL_FIELD_NUMBER = 3; private java.lang.Object fromURL_; /** * optional string fromURL = 3; */ public boolean hasFromURL() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string fromURL = 3; */ public java.lang.String getFromURL() { java.lang.Object ref = fromURL_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromURL_ = s; } return s; } } /** * optional string fromURL = 3; */ public com.google.protobuf.ByteString getFromURLBytes() { java.lang.Object ref = fromURL_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromURL_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance(); httpPort_ = 0; fromURL_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasManifest()) { memoizedIsInitialized = 0; return false; } if (!hasHttpPort()) { memoizedIsInitialized = 0; return false; } if (!getManifest().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, manifest_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, httpPort_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getFromURLBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, manifest_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, httpPort_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getFromURLBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj; boolean result = true; result = result && (hasManifest() == other.hasManifest()); if (hasManifest()) { result = result && getManifest() .equals(other.getManifest()); } result = result && (hasHttpPort() == other.hasHttpPort()); if (hasHttpPort()) { result = result && (getHttpPort() == other.getHttpPort()); } result = result && (hasFromURL() == other.hasFromURL()); if (hasFromURL()) { result = result && getFromURL() .equals(other.getFromURL()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasManifest()) { hash = (37 * hash) + MANIFEST_FIELD_NUMBER; hash = (53 * hash) + getManifest().hashCode(); } if (hasHttpPort()) { hash = (37 * hash) + HTTPPORT_FIELD_NUMBER; hash = (53 * hash) + getHttpPort(); } if (hasFromURL()) { hash = (37 * hash) + FROMURL_FIELD_NUMBER; hash = (53 * hash) + getFromURL().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getManifestFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (manifestBuilder_ == null) { manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance(); } else { manifestBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); httpPort_ = 0; bitField0_ = (bitField0_ & ~0x00000002); fromURL_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (manifestBuilder_ == null) { result.manifest_ = manifest_; } else { result.manifest_ = manifestBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.httpPort_ = httpPort_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.fromURL_ = fromURL_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this; if (other.hasManifest()) { mergeManifest(other.getManifest()); } if (other.hasHttpPort()) { setHttpPort(other.getHttpPort()); } if (other.hasFromURL()) { bitField0_ |= 0x00000004; fromURL_ = other.fromURL_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasManifest()) { return false; } if (!hasHttpPort()) { return false; } if (!getManifest().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_; /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public boolean hasManifest() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto getManifest() { if (manifestBuilder_ == null) { return manifest_; } else { return manifestBuilder_.getMessage(); } } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto value) { if (manifestBuilder_ == null) { if (value == null) { throw new NullPointerException(); } manifest_ = value; onChanged(); } else { manifestBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder setManifest( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder builderForValue) { if (manifestBuilder_ == null) { manifest_ = builderForValue.build(); onChanged(); } else { manifestBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto value) { if (manifestBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance()) { manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial(); } else { manifest_ = value; } onChanged(); } else { manifestBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public Builder clearManifest() { if (manifestBuilder_ == null) { manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance(); onChanged(); } else { manifestBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() { bitField0_ |= 0x00000001; onChanged(); return getManifestFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { if (manifestBuilder_ != null) { return manifestBuilder_.getMessageOrBuilder(); } else { return manifest_; } } /** * required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder> getManifestFieldBuilder() { if (manifestBuilder_ == null) { manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder>( manifest_, getParentForChildren(), isClean()); manifest_ = null; } return manifestBuilder_; } // required uint32 httpPort = 2; private int httpPort_ ; /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public boolean hasHttpPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public int getHttpPort() { return httpPort_; } /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public Builder setHttpPort(int value) { bitField0_ |= 0x00000002; httpPort_ = value; onChanged(); return this; } /** * required uint32 httpPort = 2; * *
       * Deprecated by fromURL
       * 
*/ public Builder clearHttpPort() { bitField0_ = (bitField0_ & ~0x00000002); httpPort_ = 0; onChanged(); return this; } // optional string fromURL = 3; private java.lang.Object fromURL_ = ""; /** * optional string fromURL = 3; */ public boolean hasFromURL() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string fromURL = 3; */ public java.lang.String getFromURL() { java.lang.Object ref = fromURL_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); fromURL_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string fromURL = 3; */ public com.google.protobuf.ByteString getFromURLBytes() { java.lang.Object ref = fromURL_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromURL_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * optional string fromURL = 3; */ public Builder setFromURL( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; } /** * optional string fromURL = 3; */ public Builder clearFromURL() { bitField0_ = (bitField0_ & ~0x00000004); fromURL_ = getDefaultInstance().getFromURL(); onChanged(); return this; } /** * optional string fromURL = 3; */ public Builder setFromURLBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetEditLogManifestResponseProto) } static { defaultInstance = new GetEditLogManifestResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetEditLogManifestResponseProto) } public interface GetJournaledEditsRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ boolean hasJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid(); /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder(); // required uint64 sinceTxId = 2; /** * required uint64 sinceTxId = 2; */ boolean hasSinceTxId(); /** * required uint64 sinceTxId = 2; */ long getSinceTxId(); // required uint32 maxTxns = 3; /** * required uint32 maxTxns = 3; */ boolean hasMaxTxns(); /** * required uint32 maxTxns = 3; */ int getMaxTxns(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournaledEditsRequestProto} * *
   **
   * getJournaledEdits()
   * 
*/ public static final class GetJournaledEditsRequestProto extends com.google.protobuf.GeneratedMessage implements GetJournaledEditsRequestProtoOrBuilder { // Use GetJournaledEditsRequestProto.newBuilder() to construct. private GetJournaledEditsRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetJournaledEditsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetJournaledEditsRequestProto defaultInstance; public static GetJournaledEditsRequestProto getDefaultInstance() { return defaultInstance; } public GetJournaledEditsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetJournaledEditsRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = jid_.toBuilder(); } jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(jid_); jid_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; sinceTxId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; maxTxns_ = input.readUInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetJournaledEditsRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetJournaledEditsRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; public static final int JID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { return jid_; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { return jid_; } // required uint64 sinceTxId = 2; public static final int SINCETXID_FIELD_NUMBER = 2; private long sinceTxId_; /** * required uint64 sinceTxId = 2; */ public boolean hasSinceTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 sinceTxId = 2; */ public long getSinceTxId() { return sinceTxId_; } // required uint32 maxTxns = 3; public static final int MAXTXNS_FIELD_NUMBER = 3; private int maxTxns_; /** * required uint32 maxTxns = 3; */ public boolean hasMaxTxns() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 maxTxns = 3; */ public int getMaxTxns() { return maxTxns_; } private void initFields() { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); sinceTxId_ = 0L; maxTxns_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasJid()) { memoizedIsInitialized = 0; return false; } if (!hasSinceTxId()) { memoizedIsInitialized = 0; return false; } if (!hasMaxTxns()) { memoizedIsInitialized = 0; return false; } if (!getJid().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, sinceTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(3, maxTxns_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, jid_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, sinceTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(3, maxTxns_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto) obj; boolean result = true; result = result && (hasJid() == other.hasJid()); if (hasJid()) { result = result && getJid() .equals(other.getJid()); } result = result && (hasSinceTxId() == other.hasSinceTxId()); if (hasSinceTxId()) { result = result && (getSinceTxId() == other.getSinceTxId()); } result = result && (hasMaxTxns() == other.hasMaxTxns()); if (hasMaxTxns()) { result = result && (getMaxTxns() == other.getMaxTxns()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasJid()) { hash = (37 * hash) + JID_FIELD_NUMBER; hash = (53 * hash) + getJid().hashCode(); } if (hasSinceTxId()) { hash = (37 * hash) + SINCETXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSinceTxId()); } if (hasMaxTxns()) { hash = (37 * hash) + MAXTXNS_FIELD_NUMBER; hash = (53 * hash) + getMaxTxns(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournaledEditsRequestProto} * *
     **
     * getJournaledEdits()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getJidFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); sinceTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); maxTxns_ = 0; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (jidBuilder_ == null) { result.jid_ = jid_; } else { result.jid_ = jidBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.sinceTxId_ = sinceTxId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.maxTxns_ = maxTxns_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.getDefaultInstance()) return this; if (other.hasJid()) { mergeJid(other.getJid()); } if (other.hasSinceTxId()) { setSinceTxId(other.getSinceTxId()); } if (other.hasMaxTxns()) { setMaxTxns(other.getMaxTxns()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasJid()) { return false; } if (!hasSinceTxId()) { return false; } if (!hasMaxTxns()) { return false; } if (!getJid().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_; /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public boolean hasJid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() { if (jidBuilder_ == null) { return jid_; } else { return jidBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() { if (jidBuilder_ != null) { return jidBuilder_.getMessageOrBuilder(); } else { return jid_; } } /** * required .hadoop.hdfs.qjournal.JournalIdProto jid = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> getJidFieldBuilder() { if (jidBuilder_ == null) { jidBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>( jid_, getParentForChildren(), isClean()); jid_ = null; } return jidBuilder_; } // required uint64 sinceTxId = 2; private long sinceTxId_ ; /** * required uint64 sinceTxId = 2; */ public boolean hasSinceTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 sinceTxId = 2; */ public long getSinceTxId() { return sinceTxId_; } /** * required uint64 sinceTxId = 2; */ public Builder setSinceTxId(long value) { bitField0_ |= 0x00000002; sinceTxId_ = value; onChanged(); return this; } /** * required uint64 sinceTxId = 2; */ public Builder clearSinceTxId() { bitField0_ = (bitField0_ & ~0x00000002); sinceTxId_ = 0L; onChanged(); return this; } // required uint32 maxTxns = 3; private int maxTxns_ ; /** * required uint32 maxTxns = 3; */ public boolean hasMaxTxns() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 maxTxns = 3; */ public int getMaxTxns() { return maxTxns_; } /** * required uint32 maxTxns = 3; */ public Builder setMaxTxns(int value) { bitField0_ |= 0x00000004; maxTxns_ = value; onChanged(); return this; } /** * required uint32 maxTxns = 3; */ public Builder clearMaxTxns() { bitField0_ = (bitField0_ & ~0x00000004); maxTxns_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournaledEditsRequestProto) } static { defaultInstance = new GetJournaledEditsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournaledEditsRequestProto) } public interface GetJournaledEditsResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 txnCount = 1; /** * required uint32 txnCount = 1; */ boolean hasTxnCount(); /** * required uint32 txnCount = 1; */ int getTxnCount(); // optional bytes editLog = 2; /** * optional bytes editLog = 2; */ boolean hasEditLog(); /** * optional bytes editLog = 2; */ com.google.protobuf.ByteString getEditLog(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournaledEditsResponseProto} */ public static final class GetJournaledEditsResponseProto extends com.google.protobuf.GeneratedMessage implements GetJournaledEditsResponseProtoOrBuilder { // Use GetJournaledEditsResponseProto.newBuilder() to construct. private GetJournaledEditsResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetJournaledEditsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetJournaledEditsResponseProto defaultInstance; public static GetJournaledEditsResponseProto getDefaultInstance() { return defaultInstance; } public GetJournaledEditsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetJournaledEditsResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; txnCount_ = input.readUInt32(); break; } case 18: { bitField0_ |= 0x00000002; editLog_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GetJournaledEditsResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetJournaledEditsResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint32 txnCount = 1; public static final int TXNCOUNT_FIELD_NUMBER = 1; private int txnCount_; /** * required uint32 txnCount = 1; */ public boolean hasTxnCount() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 txnCount = 1; */ public int getTxnCount() { return txnCount_; } // optional bytes editLog = 2; public static final int EDITLOG_FIELD_NUMBER = 2; private com.google.protobuf.ByteString editLog_; /** * optional bytes editLog = 2; */ public boolean hasEditLog() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes editLog = 2; */ public com.google.protobuf.ByteString getEditLog() { return editLog_; } private void initFields() { txnCount_ = 0; editLog_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasTxnCount()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, txnCount_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, editLog_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, txnCount_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, editLog_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto) obj; boolean result = true; result = result && (hasTxnCount() == other.hasTxnCount()); if (hasTxnCount()) { result = result && (getTxnCount() == other.getTxnCount()); } result = result && (hasEditLog() == other.hasEditLog()); if (hasEditLog()) { result = result && getEditLog() .equals(other.getEditLog()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTxnCount()) { hash = (37 * hash) + TXNCOUNT_FIELD_NUMBER; hash = (53 * hash) + getTxnCount(); } if (hasEditLog()) { hash = (37 * hash) + EDITLOG_FIELD_NUMBER; hash = (53 * hash) + getEditLog().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.GetJournaledEditsResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); txnCount_ = 0; bitField0_ = (bitField0_ & ~0x00000001); editLog_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.txnCount_ = txnCount_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.editLog_ = editLog_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.getDefaultInstance()) return this; if (other.hasTxnCount()) { setTxnCount(other.getTxnCount()); } if (other.hasEditLog()) { setEditLog(other.getEditLog()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasTxnCount()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 txnCount = 1; private int txnCount_ ; /** * required uint32 txnCount = 1; */ public boolean hasTxnCount() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 txnCount = 1; */ public int getTxnCount() { return txnCount_; } /** * required uint32 txnCount = 1; */ public Builder setTxnCount(int value) { bitField0_ |= 0x00000001; txnCount_ = value; onChanged(); return this; } /** * required uint32 txnCount = 1; */ public Builder clearTxnCount() { bitField0_ = (bitField0_ & ~0x00000001); txnCount_ = 0; onChanged(); return this; } // optional bytes editLog = 2; private com.google.protobuf.ByteString editLog_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes editLog = 2; */ public boolean hasEditLog() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes editLog = 2; */ public com.google.protobuf.ByteString getEditLog() { return editLog_; } /** * optional bytes editLog = 2; */ public Builder setEditLog(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; editLog_ = value; onChanged(); return this; } /** * optional bytes editLog = 2; */ public Builder clearEditLog() { bitField0_ = (bitField0_ & ~0x00000002); editLog_ = getDefaultInstance().getEditLog(); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournaledEditsResponseProto) } static { defaultInstance = new GetJournaledEditsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournaledEditsResponseProto) } public interface PrepareRecoveryRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ boolean hasReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder(); // required uint64 segmentTxId = 2; /** * required uint64 segmentTxId = 2; */ boolean hasSegmentTxId(); /** * required uint64 segmentTxId = 2; */ long getSegmentTxId(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryRequestProto} * *
   **
   * prepareRecovery()
   * 
*/ public static final class PrepareRecoveryRequestProto extends com.google.protobuf.GeneratedMessage implements PrepareRecoveryRequestProtoOrBuilder { // Use PrepareRecoveryRequestProto.newBuilder() to construct. private PrepareRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private PrepareRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final PrepareRecoveryRequestProto defaultInstance; public static PrepareRecoveryRequestProto getDefaultInstance() { return defaultInstance; } public PrepareRecoveryRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PrepareRecoveryRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqInfo_.toBuilder(); } reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reqInfo_); reqInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; segmentTxId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public PrepareRecoveryRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new PrepareRecoveryRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; public static final int REQINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { return reqInfo_; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { return reqInfo_; } // required uint64 segmentTxId = 2; public static final int SEGMENTTXID_FIELD_NUMBER = 2; private long segmentTxId_; /** * required uint64 segmentTxId = 2; */ public boolean hasSegmentTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 segmentTxId = 2; */ public long getSegmentTxId() { return segmentTxId_; } private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); segmentTxId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasReqInfo()) { memoizedIsInitialized = 0; return false; } if (!hasSegmentTxId()) { memoizedIsInitialized = 0; return false; } if (!getReqInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, segmentTxId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, segmentTxId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj; boolean result = true; result = result && (hasReqInfo() == other.hasReqInfo()); if (hasReqInfo()) { result = result && getReqInfo() .equals(other.getReqInfo()); } result = result && (hasSegmentTxId() == other.hasSegmentTxId()); if (hasSegmentTxId()) { result = result && (getSegmentTxId() == other.getSegmentTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasReqInfo()) { hash = (37 * hash) + REQINFO_FIELD_NUMBER; hash = (53 * hash) + getReqInfo().hashCode(); } if (hasSegmentTxId()) { hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSegmentTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryRequestProto} * *
     **
     * prepareRecovery()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getReqInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); segmentTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (reqInfoBuilder_ == null) { result.reqInfo_ = reqInfo_; } else { result.reqInfo_ = reqInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.segmentTxId_ = segmentTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } if (other.hasSegmentTxId()) { setSegmentTxId(other.getSegmentTxId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasSegmentTxId()) { return false; } if (!getReqInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { if (reqInfoBuilder_ == null) { return reqInfo_; } else { return reqInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { if (reqInfoBuilder_ != null) { return reqInfoBuilder_.getMessageOrBuilder(); } else { return reqInfo_; } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; } // required uint64 segmentTxId = 2; private long segmentTxId_ ; /** * required uint64 segmentTxId = 2; */ public boolean hasSegmentTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 segmentTxId = 2; */ public long getSegmentTxId() { return segmentTxId_; } /** * required uint64 segmentTxId = 2; */ public Builder setSegmentTxId(long value) { bitField0_ |= 0x00000002; segmentTxId_ = value; onChanged(); return this; } /** * required uint64 segmentTxId = 2; */ public Builder clearSegmentTxId() { bitField0_ = (bitField0_ & ~0x00000002); segmentTxId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PrepareRecoveryRequestProto) } static { defaultInstance = new PrepareRecoveryRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PrepareRecoveryRequestProto) } public interface PrepareRecoveryResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ boolean hasSegmentState(); /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState(); /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder(); // optional uint64 acceptedInEpoch = 2; /** * optional uint64 acceptedInEpoch = 2; */ boolean hasAcceptedInEpoch(); /** * optional uint64 acceptedInEpoch = 2; */ long getAcceptedInEpoch(); // required uint64 lastWriterEpoch = 3; /** * required uint64 lastWriterEpoch = 3; */ boolean hasLastWriterEpoch(); /** * required uint64 lastWriterEpoch = 3; */ long getLastWriterEpoch(); // optional uint64 lastCommittedTxId = 4; /** * optional uint64 lastCommittedTxId = 4; * *
     * The highest committed txid that this logger has ever seen.
     * This may be higher than the data it actually has, in the case
     * that it was lagging before the old writer crashed.
     * 
*/ boolean hasLastCommittedTxId(); /** * optional uint64 lastCommittedTxId = 4; * *
     * The highest committed txid that this logger has ever seen.
     * This may be higher than the data it actually has, in the case
     * that it was lagging before the old writer crashed.
     * 
*/ long getLastCommittedTxId(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryResponseProto} */ public static final class PrepareRecoveryResponseProto extends com.google.protobuf.GeneratedMessage implements PrepareRecoveryResponseProtoOrBuilder { // Use PrepareRecoveryResponseProto.newBuilder() to construct. private PrepareRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private PrepareRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final PrepareRecoveryResponseProto defaultInstance; public static PrepareRecoveryResponseProto getDefaultInstance() { return defaultInstance; } public PrepareRecoveryResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PrepareRecoveryResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = segmentState_.toBuilder(); } segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(segmentState_); segmentState_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; acceptedInEpoch_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; lastWriterEpoch_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; lastCommittedTxId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public PrepareRecoveryResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new PrepareRecoveryResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; public static final int SEGMENTSTATE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_; /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public boolean hasSegmentState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() { return segmentState_; } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() { return segmentState_; } // optional uint64 acceptedInEpoch = 2; public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2; private long acceptedInEpoch_; /** * optional uint64 acceptedInEpoch = 2; */ public boolean hasAcceptedInEpoch() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint64 acceptedInEpoch = 2; */ public long getAcceptedInEpoch() { return acceptedInEpoch_; } // required uint64 lastWriterEpoch = 3; public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3; private long lastWriterEpoch_; /** * required uint64 lastWriterEpoch = 3; */ public boolean hasLastWriterEpoch() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 lastWriterEpoch = 3; */ public long getLastWriterEpoch() { return lastWriterEpoch_; } // optional uint64 lastCommittedTxId = 4; public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4; private long lastCommittedTxId_; /** * optional uint64 lastCommittedTxId = 4; * *
     * The highest committed txid that this logger has ever seen.
     * This may be higher than the data it actually has, in the case
     * that it was lagging before the old writer crashed.
     * 
*/ public boolean hasLastCommittedTxId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 lastCommittedTxId = 4; * *
     * The highest committed txid that this logger has ever seen.
     * This may be higher than the data it actually has, in the case
     * that it was lagging before the old writer crashed.
     * 
*/ public long getLastCommittedTxId() { return lastCommittedTxId_; } private void initFields() { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); acceptedInEpoch_ = 0L; lastWriterEpoch_ = 0L; lastCommittedTxId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasLastWriterEpoch()) { memoizedIsInitialized = 0; return false; } if (hasSegmentState()) { if (!getSegmentState().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, segmentState_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, acceptedInEpoch_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, lastWriterEpoch_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, lastCommittedTxId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, segmentState_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, acceptedInEpoch_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, lastWriterEpoch_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(4, lastCommittedTxId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj; boolean result = true; result = result && (hasSegmentState() == other.hasSegmentState()); if (hasSegmentState()) { result = result && getSegmentState() .equals(other.getSegmentState()); } result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch()); if (hasAcceptedInEpoch()) { result = result && (getAcceptedInEpoch() == other.getAcceptedInEpoch()); } result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch()); if (hasLastWriterEpoch()) { result = result && (getLastWriterEpoch() == other.getLastWriterEpoch()); } result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId()); if (hasLastCommittedTxId()) { result = result && (getLastCommittedTxId() == other.getLastCommittedTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSegmentState()) { hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER; hash = (53 * hash) + getSegmentState().hashCode(); } if (hasAcceptedInEpoch()) { hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getAcceptedInEpoch()); } if (hasLastWriterEpoch()) { hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastWriterEpoch()); } if (hasLastCommittedTxId()) { hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastCommittedTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getSegmentStateFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); acceptedInEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); lastWriterEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); lastCommittedTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (segmentStateBuilder_ == null) { result.segmentState_ = segmentState_; } else { result.segmentState_ = segmentStateBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.acceptedInEpoch_ = acceptedInEpoch_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.lastWriterEpoch_ = lastWriterEpoch_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.lastCommittedTxId_ = lastCommittedTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this; if (other.hasSegmentState()) { mergeSegmentState(other.getSegmentState()); } if (other.hasAcceptedInEpoch()) { setAcceptedInEpoch(other.getAcceptedInEpoch()); } if (other.hasLastWriterEpoch()) { setLastWriterEpoch(other.getLastWriterEpoch()); } if (other.hasLastCommittedTxId()) { setLastCommittedTxId(other.getLastCommittedTxId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasLastWriterEpoch()) { return false; } if (hasSegmentState()) { if (!getSegmentState().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_; /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public boolean hasSegmentState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() { if (segmentStateBuilder_ == null) { return segmentState_; } else { return segmentStateBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (value == null) { throw new NullPointerException(); } segmentState_ = value; onChanged(); } else { segmentStateBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder setSegmentState( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) { if (segmentStateBuilder_ == null) { segmentState_ = builderForValue.build(); onChanged(); } else { segmentStateBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public Builder clearSegmentState() { if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); onChanged(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() { bitField0_ |= 0x00000001; onChanged(); return getSegmentStateFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() { if (segmentStateBuilder_ != null) { return segmentStateBuilder_.getMessageOrBuilder(); } else { return segmentState_; } } /** * optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> getSegmentStateFieldBuilder() { if (segmentStateBuilder_ == null) { segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>( segmentState_, getParentForChildren(), isClean()); segmentState_ = null; } return segmentStateBuilder_; } // optional uint64 acceptedInEpoch = 2; private long acceptedInEpoch_ ; /** * optional uint64 acceptedInEpoch = 2; */ public boolean hasAcceptedInEpoch() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint64 acceptedInEpoch = 2; */ public long getAcceptedInEpoch() { return acceptedInEpoch_; } /** * optional uint64 acceptedInEpoch = 2; */ public Builder setAcceptedInEpoch(long value) { bitField0_ |= 0x00000002; acceptedInEpoch_ = value; onChanged(); return this; } /** * optional uint64 acceptedInEpoch = 2; */ public Builder clearAcceptedInEpoch() { bitField0_ = (bitField0_ & ~0x00000002); acceptedInEpoch_ = 0L; onChanged(); return this; } // required uint64 lastWriterEpoch = 3; private long lastWriterEpoch_ ; /** * required uint64 lastWriterEpoch = 3; */ public boolean hasLastWriterEpoch() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 lastWriterEpoch = 3; */ public long getLastWriterEpoch() { return lastWriterEpoch_; } /** * required uint64 lastWriterEpoch = 3; */ public Builder setLastWriterEpoch(long value) { bitField0_ |= 0x00000004; lastWriterEpoch_ = value; onChanged(); return this; } /** * required uint64 lastWriterEpoch = 3; */ public Builder clearLastWriterEpoch() { bitField0_ = (bitField0_ & ~0x00000004); lastWriterEpoch_ = 0L; onChanged(); return this; } // optional uint64 lastCommittedTxId = 4; private long lastCommittedTxId_ ; /** * optional uint64 lastCommittedTxId = 4; * *
       * The highest committed txid that this logger has ever seen.
       * This may be higher than the data it actually has, in the case
       * that it was lagging before the old writer crashed.
       * 
*/ public boolean hasLastCommittedTxId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 lastCommittedTxId = 4; * *
       * The highest committed txid that this logger has ever seen.
       * This may be higher than the data it actually has, in the case
       * that it was lagging before the old writer crashed.
       * 
*/ public long getLastCommittedTxId() { return lastCommittedTxId_; } /** * optional uint64 lastCommittedTxId = 4; * *
       * The highest committed txid that this logger has ever seen.
       * This may be higher than the data it actually has, in the case
       * that it was lagging before the old writer crashed.
       * 
*/ public Builder setLastCommittedTxId(long value) { bitField0_ |= 0x00000008; lastCommittedTxId_ = value; onChanged(); return this; } /** * optional uint64 lastCommittedTxId = 4; * *
       * The highest committed txid that this logger has ever seen.
       * This may be higher than the data it actually has, in the case
       * that it was lagging before the old writer crashed.
       * 
*/ public Builder clearLastCommittedTxId() { bitField0_ = (bitField0_ & ~0x00000008); lastCommittedTxId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PrepareRecoveryResponseProto) } static { defaultInstance = new PrepareRecoveryResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PrepareRecoveryResponseProto) } public interface AcceptRecoveryRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ boolean hasReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo(); /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder(); // required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
     ** Details on the segment to recover 
     * 
*/ boolean hasStateToAccept(); /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
     ** Details on the segment to recover 
     * 
*/ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept(); /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
     ** Details on the segment to recover 
     * 
*/ org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder(); // required string fromURL = 3; /** * required string fromURL = 3; * *
     ** The URL from which the log may be copied 
     * 
*/ boolean hasFromURL(); /** * required string fromURL = 3; * *
     ** The URL from which the log may be copied 
     * 
*/ java.lang.String getFromURL(); /** * required string fromURL = 3; * *
     ** The URL from which the log may be copied 
     * 
*/ com.google.protobuf.ByteString getFromURLBytes(); } /** * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryRequestProto} * *
   **
   * acceptRecovery()
   * 
*/ public static final class AcceptRecoveryRequestProto extends com.google.protobuf.GeneratedMessage implements AcceptRecoveryRequestProtoOrBuilder { // Use AcceptRecoveryRequestProto.newBuilder() to construct. private AcceptRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AcceptRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AcceptRecoveryRequestProto defaultInstance; public static AcceptRecoveryRequestProto getDefaultInstance() { return defaultInstance; } public AcceptRecoveryRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AcceptRecoveryRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqInfo_.toBuilder(); } reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(reqInfo_); reqInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = stateToAccept_.toBuilder(); } stateToAccept_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(stateToAccept_); stateToAccept_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 26: { bitField0_ |= 0x00000004; fromURL_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public AcceptRecoveryRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new AcceptRecoveryRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; public static final int REQINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { return reqInfo_; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { return reqInfo_; } // required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; public static final int STATETOACCEPT_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_; /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
     ** Details on the segment to recover 
     * 
*/ public boolean hasStateToAccept() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
     ** Details on the segment to recover 
     * 
*/ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() { return stateToAccept_; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
     ** Details on the segment to recover 
     * 
*/ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() { return stateToAccept_; } // required string fromURL = 3; public static final int FROMURL_FIELD_NUMBER = 3; private java.lang.Object fromURL_; /** * required string fromURL = 3; * *
     ** The URL from which the log may be copied 
     * 
*/ public boolean hasFromURL() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string fromURL = 3; * *
     ** The URL from which the log may be copied 
     * 
*/ public java.lang.String getFromURL() { java.lang.Object ref = fromURL_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromURL_ = s; } return s; } } /** * required string fromURL = 3; * *
     ** The URL from which the log may be copied 
     * 
*/ public com.google.protobuf.ByteString getFromURLBytes() { java.lang.Object ref = fromURL_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromURL_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); fromURL_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasReqInfo()) { memoizedIsInitialized = 0; return false; } if (!hasStateToAccept()) { memoizedIsInitialized = 0; return false; } if (!hasFromURL()) { memoizedIsInitialized = 0; return false; } if (!getReqInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getStateToAccept().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, stateToAccept_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getFromURLBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, reqInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, stateToAccept_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getFromURLBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj; boolean result = true; result = result && (hasReqInfo() == other.hasReqInfo()); if (hasReqInfo()) { result = result && getReqInfo() .equals(other.getReqInfo()); } result = result && (hasStateToAccept() == other.hasStateToAccept()); if (hasStateToAccept()) { result = result && getStateToAccept() .equals(other.getStateToAccept()); } result = result && (hasFromURL() == other.hasFromURL()); if (hasFromURL()) { result = result && getFromURL() .equals(other.getFromURL()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasReqInfo()) { hash = (37 * hash) + REQINFO_FIELD_NUMBER; hash = (53 * hash) + getReqInfo().hashCode(); } if (hasStateToAccept()) { hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER; hash = (53 * hash) + getStateToAccept().hashCode(); } if (hasFromURL()) { hash = (37 * hash) + FROMURL_FIELD_NUMBER; hash = (53 * hash) + getFromURL().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryRequestProto} * *
     **
     * acceptRecovery()
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getReqInfoFieldBuilder(); getStateToAcceptFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (stateToAcceptBuilder_ == null) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { stateToAcceptBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); fromURL_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (reqInfoBuilder_ == null) { result.reqInfo_ = reqInfo_; } else { result.reqInfo_ = reqInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (stateToAcceptBuilder_ == null) { result.stateToAccept_ = stateToAccept_; } else { result.stateToAccept_ = stateToAcceptBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.fromURL_ = fromURL_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } if (other.hasStateToAccept()) { mergeStateToAccept(other.getStateToAccept()); } if (other.hasFromURL()) { bitField0_ |= 0x00000004; fromURL_ = other.fromURL_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasStateToAccept()) { return false; } if (!hasFromURL()) { return false; } if (!getReqInfo().isInitialized()) { return false; } if (!getStateToAccept().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_; /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public boolean hasReqInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() { if (reqInfoBuilder_ == null) { return reqInfo_; } else { return reqInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() { if (reqInfoBuilder_ != null) { return reqInfoBuilder_.getMessageOrBuilder(); } else { return reqInfo_; } } /** * required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; } // required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_; /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public boolean hasStateToAccept() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() { if (stateToAcceptBuilder_ == null) { return stateToAccept_; } else { return stateToAcceptBuilder_.getMessage(); } } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (stateToAcceptBuilder_ == null) { if (value == null) { throw new NullPointerException(); } stateToAccept_ = value; onChanged(); } else { stateToAcceptBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public Builder setStateToAccept( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) { if (stateToAcceptBuilder_ == null) { stateToAccept_ = builderForValue.build(); onChanged(); } else { stateToAcceptBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (stateToAcceptBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial(); } else { stateToAccept_ = value; } onChanged(); } else { stateToAcceptBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public Builder clearStateToAccept() { if (stateToAcceptBuilder_ == null) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); onChanged(); } else { stateToAcceptBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStateToAcceptFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() { if (stateToAcceptBuilder_ != null) { return stateToAcceptBuilder_.getMessageOrBuilder(); } else { return stateToAccept_; } } /** * required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2; * *
       ** Details on the segment to recover 
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> getStateToAcceptFieldBuilder() { if (stateToAcceptBuilder_ == null) { stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>( stateToAccept_, getParentForChildren(), isClean()); stateToAccept_ = null; } return stateToAcceptBuilder_; } // required string fromURL = 3; private java.lang.Object fromURL_ = ""; /** * required string fromURL = 3; * *
       ** The URL from which the log may be copied 
       * 
*/ public boolean hasFromURL() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string fromURL = 3; * *
       ** The URL from which the log may be copied 
       * 
*/ public java.lang.String getFromURL() { java.lang.Object ref = fromURL_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); fromURL_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string fromURL = 3; * *
       ** The URL from which the log may be copied 
       * 
*/ public com.google.protobuf.ByteString getFromURLBytes() { java.lang.Object ref = fromURL_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromURL_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string fromURL = 3; * *
       ** The URL from which the log may be copied 
       * 
*/ public Builder setFromURL( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; } /** * required string fromURL = 3; * *
       ** The URL from which the log may be copied 
       * 
*/ public Builder clearFromURL() { bitField0_ = (bitField0_ & ~0x00000004); fromURL_ = getDefaultInstance().getFromURL(); onChanged(); return this; } /** * required string fromURL = 3; * *
       ** The URL from which the log may be copied 
       * 
*/ public Builder setFromURLBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.AcceptRecoveryRequestProto) } static { defaultInstance = new AcceptRecoveryRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.AcceptRecoveryRequestProto) } public interface AcceptRecoveryResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryResponseProto} */ public static final class AcceptRecoveryResponseProto extends com.google.protobuf.GeneratedMessage implements AcceptRecoveryResponseProtoOrBuilder { // Use AcceptRecoveryResponseProto.newBuilder() to construct. private AcceptRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AcceptRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AcceptRecoveryResponseProto defaultInstance; public static AcceptRecoveryResponseProto getDefaultInstance() { return defaultInstance; } public AcceptRecoveryResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AcceptRecoveryResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public AcceptRecoveryResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new AcceptRecoveryResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryResponseProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.AcceptRecoveryResponseProto) } static { defaultInstance = new AcceptRecoveryResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.AcceptRecoveryResponseProto) } /** * Protobuf service {@code hadoop.hdfs.qjournal.QJournalProtocolService} * *
   **
   * Protocol used to journal edits to a JournalNode.
   * See the request and response for details of rpc call.
   * 
*/ public static abstract class QJournalProtocolService implements com.google.protobuf.Service { protected QJournalProtocolService() {} public interface Interface { /** * rpc isFormatted(.hadoop.hdfs.qjournal.IsFormattedRequestProto) returns (.hadoop.hdfs.qjournal.IsFormattedResponseProto); */ public abstract void isFormatted( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc discardSegments(.hadoop.hdfs.qjournal.DiscardSegmentsRequestProto) returns (.hadoop.hdfs.qjournal.DiscardSegmentsResponseProto); */ public abstract void discardSegments( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getJournalCTime(.hadoop.hdfs.qjournal.GetJournalCTimeRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalCTimeResponseProto); */ public abstract void getJournalCTime( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doPreUpgrade(.hadoop.hdfs.qjournal.DoPreUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoPreUpgradeResponseProto); */ public abstract void doPreUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doUpgrade(.hadoop.hdfs.qjournal.DoUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoUpgradeResponseProto); */ public abstract void doUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doFinalize(.hadoop.hdfs.qjournal.DoFinalizeRequestProto) returns (.hadoop.hdfs.qjournal.DoFinalizeResponseProto); */ public abstract void doFinalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc canRollBack(.hadoop.hdfs.qjournal.CanRollBackRequestProto) returns (.hadoop.hdfs.qjournal.CanRollBackResponseProto); */ public abstract void canRollBack( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doRollback(.hadoop.hdfs.qjournal.DoRollbackRequestProto) returns (.hadoop.hdfs.qjournal.DoRollbackResponseProto); */ public abstract void doRollback( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getJournalState(.hadoop.hdfs.qjournal.GetJournalStateRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalStateResponseProto); */ public abstract void getJournalState( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc newEpoch(.hadoop.hdfs.qjournal.NewEpochRequestProto) returns (.hadoop.hdfs.qjournal.NewEpochResponseProto); */ public abstract void newEpoch( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc format(.hadoop.hdfs.qjournal.FormatRequestProto) returns (.hadoop.hdfs.qjournal.FormatResponseProto); */ public abstract void format( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc journal(.hadoop.hdfs.qjournal.JournalRequestProto) returns (.hadoop.hdfs.qjournal.JournalResponseProto); */ public abstract void journal( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc heartbeat(.hadoop.hdfs.qjournal.HeartbeatRequestProto) returns (.hadoop.hdfs.qjournal.HeartbeatResponseProto); */ public abstract void heartbeat( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc startLogSegment(.hadoop.hdfs.qjournal.StartLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.StartLogSegmentResponseProto); */ public abstract void startLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc finalizeLogSegment(.hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto); */ public abstract void finalizeLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc purgeLogs(.hadoop.hdfs.qjournal.PurgeLogsRequestProto) returns (.hadoop.hdfs.qjournal.PurgeLogsResponseProto); */ public abstract void purgeLogs( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getEditLogManifest(.hadoop.hdfs.qjournal.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.qjournal.GetEditLogManifestResponseProto); */ public abstract void getEditLogManifest( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getJournaledEdits(.hadoop.hdfs.qjournal.GetJournaledEditsRequestProto) returns (.hadoop.hdfs.qjournal.GetJournaledEditsResponseProto); */ public abstract void getJournaledEdits( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc prepareRecovery(.hadoop.hdfs.qjournal.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.PrepareRecoveryResponseProto); */ public abstract void prepareRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc acceptRecovery(.hadoop.hdfs.qjournal.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.AcceptRecoveryResponseProto); */ public abstract void acceptRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request, com.google.protobuf.RpcCallback done); } public static com.google.protobuf.Service newReflectiveService( final Interface impl) { return new QJournalProtocolService() { @java.lang.Override public void isFormatted( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request, com.google.protobuf.RpcCallback done) { impl.isFormatted(controller, request, done); } @java.lang.Override public void discardSegments( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request, com.google.protobuf.RpcCallback done) { impl.discardSegments(controller, request, done); } @java.lang.Override public void getJournalCTime( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request, com.google.protobuf.RpcCallback done) { impl.getJournalCTime(controller, request, done); } @java.lang.Override public void doPreUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request, com.google.protobuf.RpcCallback done) { impl.doPreUpgrade(controller, request, done); } @java.lang.Override public void doUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request, com.google.protobuf.RpcCallback done) { impl.doUpgrade(controller, request, done); } @java.lang.Override public void doFinalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request, com.google.protobuf.RpcCallback done) { impl.doFinalize(controller, request, done); } @java.lang.Override public void canRollBack( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request, com.google.protobuf.RpcCallback done) { impl.canRollBack(controller, request, done); } @java.lang.Override public void doRollback( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request, com.google.protobuf.RpcCallback done) { impl.doRollback(controller, request, done); } @java.lang.Override public void getJournalState( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request, com.google.protobuf.RpcCallback done) { impl.getJournalState(controller, request, done); } @java.lang.Override public void newEpoch( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request, com.google.protobuf.RpcCallback done) { impl.newEpoch(controller, request, done); } @java.lang.Override public void format( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request, com.google.protobuf.RpcCallback done) { impl.format(controller, request, done); } @java.lang.Override public void journal( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request, com.google.protobuf.RpcCallback done) { impl.journal(controller, request, done); } @java.lang.Override public void heartbeat( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request, com.google.protobuf.RpcCallback done) { impl.heartbeat(controller, request, done); } @java.lang.Override public void startLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request, com.google.protobuf.RpcCallback done) { impl.startLogSegment(controller, request, done); } @java.lang.Override public void finalizeLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request, com.google.protobuf.RpcCallback done) { impl.finalizeLogSegment(controller, request, done); } @java.lang.Override public void purgeLogs( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request, com.google.protobuf.RpcCallback done) { impl.purgeLogs(controller, request, done); } @java.lang.Override public void getEditLogManifest( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request, com.google.protobuf.RpcCallback done) { impl.getEditLogManifest(controller, request, done); } @java.lang.Override public void getJournaledEdits( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto request, com.google.protobuf.RpcCallback done) { impl.getJournaledEdits(controller, request, done); } @java.lang.Override public void prepareRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request, com.google.protobuf.RpcCallback done) { impl.prepareRecovery(controller, request, done); } @java.lang.Override public void acceptRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request, com.google.protobuf.RpcCallback done) { impl.acceptRecovery(controller, request, done); } }; } public static com.google.protobuf.BlockingService newReflectiveBlockingService(final BlockingInterface impl) { return new com.google.protobuf.BlockingService() { public final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final com.google.protobuf.Message callBlockingMethod( com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.RpcController controller, com.google.protobuf.Message request) throws com.google.protobuf.ServiceException { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callBlockingMethod() given method descriptor for " + "wrong service type."); } switch(method.getIndex()) { case 0: return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request); case 1: return impl.discardSegments(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)request); case 2: return impl.getJournalCTime(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)request); case 3: return impl.doPreUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)request); case 4: return impl.doUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)request); case 5: return impl.doFinalize(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)request); case 6: return impl.canRollBack(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)request); case 7: return impl.doRollback(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)request); case 8: return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request); case 9: return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request); case 10: return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request); case 11: return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request); case 12: return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request); case 13: return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request); case 14: return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request); case 15: return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request); case 16: return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request); case 17: return impl.getJournaledEdits(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto)request); case 18: return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request); case 19: return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getResponsePrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } }; } /** * rpc isFormatted(.hadoop.hdfs.qjournal.IsFormattedRequestProto) returns (.hadoop.hdfs.qjournal.IsFormattedResponseProto); */ public abstract void isFormatted( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc discardSegments(.hadoop.hdfs.qjournal.DiscardSegmentsRequestProto) returns (.hadoop.hdfs.qjournal.DiscardSegmentsResponseProto); */ public abstract void discardSegments( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getJournalCTime(.hadoop.hdfs.qjournal.GetJournalCTimeRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalCTimeResponseProto); */ public abstract void getJournalCTime( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doPreUpgrade(.hadoop.hdfs.qjournal.DoPreUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoPreUpgradeResponseProto); */ public abstract void doPreUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doUpgrade(.hadoop.hdfs.qjournal.DoUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoUpgradeResponseProto); */ public abstract void doUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doFinalize(.hadoop.hdfs.qjournal.DoFinalizeRequestProto) returns (.hadoop.hdfs.qjournal.DoFinalizeResponseProto); */ public abstract void doFinalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc canRollBack(.hadoop.hdfs.qjournal.CanRollBackRequestProto) returns (.hadoop.hdfs.qjournal.CanRollBackResponseProto); */ public abstract void canRollBack( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc doRollback(.hadoop.hdfs.qjournal.DoRollbackRequestProto) returns (.hadoop.hdfs.qjournal.DoRollbackResponseProto); */ public abstract void doRollback( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getJournalState(.hadoop.hdfs.qjournal.GetJournalStateRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalStateResponseProto); */ public abstract void getJournalState( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc newEpoch(.hadoop.hdfs.qjournal.NewEpochRequestProto) returns (.hadoop.hdfs.qjournal.NewEpochResponseProto); */ public abstract void newEpoch( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc format(.hadoop.hdfs.qjournal.FormatRequestProto) returns (.hadoop.hdfs.qjournal.FormatResponseProto); */ public abstract void format( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc journal(.hadoop.hdfs.qjournal.JournalRequestProto) returns (.hadoop.hdfs.qjournal.JournalResponseProto); */ public abstract void journal( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc heartbeat(.hadoop.hdfs.qjournal.HeartbeatRequestProto) returns (.hadoop.hdfs.qjournal.HeartbeatResponseProto); */ public abstract void heartbeat( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc startLogSegment(.hadoop.hdfs.qjournal.StartLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.StartLogSegmentResponseProto); */ public abstract void startLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc finalizeLogSegment(.hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto); */ public abstract void finalizeLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc purgeLogs(.hadoop.hdfs.qjournal.PurgeLogsRequestProto) returns (.hadoop.hdfs.qjournal.PurgeLogsResponseProto); */ public abstract void purgeLogs( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getEditLogManifest(.hadoop.hdfs.qjournal.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.qjournal.GetEditLogManifestResponseProto); */ public abstract void getEditLogManifest( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc getJournaledEdits(.hadoop.hdfs.qjournal.GetJournaledEditsRequestProto) returns (.hadoop.hdfs.qjournal.GetJournaledEditsResponseProto); */ public abstract void getJournaledEdits( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc prepareRecovery(.hadoop.hdfs.qjournal.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.PrepareRecoveryResponseProto); */ public abstract void prepareRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request, com.google.protobuf.RpcCallback done); /** * rpc acceptRecovery(.hadoop.hdfs.qjournal.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.AcceptRecoveryResponseProto); */ public abstract void acceptRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request, com.google.protobuf.RpcCallback done); public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0); } public final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final void callMethod( com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.RpcController controller, com.google.protobuf.Message request, com.google.protobuf.RpcCallback< com.google.protobuf.Message> done) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callMethod() given method descriptor for wrong " + "service type."); } switch(method.getIndex()) { case 0: this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 1: this.discardSegments(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 2: this.getJournalCTime(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 3: this.doPreUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 4: this.doUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 5: this.doFinalize(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 6: this.canRollBack(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 7: this.doRollback(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 8: this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 9: this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 10: this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 11: this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 12: this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 13: this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 14: this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 15: this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 16: this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 17: this.getJournaledEdits(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 18: this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 19: this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getResponsePrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public static Stub newStub( com.google.protobuf.RpcChannel channel) { return new Stub(channel); } public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface { private Stub(com.google.protobuf.RpcChannel channel) { this.channel = channel; } private final com.google.protobuf.RpcChannel channel; public com.google.protobuf.RpcChannel getChannel() { return channel; } public void isFormatted( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance())); } public void discardSegments( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance())); } public void getJournalCTime( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance())); } public void doPreUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance())); } public void doUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance())); } public void doFinalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance())); } public void canRollBack( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance())); } public void doRollback( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance())); } public void getJournalState( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance())); } public void newEpoch( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance())); } public void format( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance())); } public void journal( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance())); } public void heartbeat( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance())); } public void startLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance())); } public void finalizeLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance())); } public void purgeLogs( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance())); } public void getEditLogManifest( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance())); } public void getJournaledEdits( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.getDefaultInstance())); } public void prepareRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance())); } public void acceptRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance())); } } public static BlockingInterface newBlockingStub( com.google.protobuf.BlockingRpcChannel channel) { return new BlockingStub(channel); } public interface BlockingInterface { public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto discardSegments( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto getJournalCTime( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto doPreUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto doUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto doFinalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto canRollBack( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto doRollback( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto getJournaledEdits( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request) throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { this.channel = channel; } private final com.google.protobuf.BlockingRpcChannel channel; public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto discardSegments( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto getJournalCTime( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto doPreUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto doUpgrade( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto doFinalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto canRollBack( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto doRollback( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto getJournaledEdits( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery( com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()); } } // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.QJournalProtocolService) } private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\026QJournalProtocol.proto\022\024hadoop.hdfs.qj" + "ournal\032\nhdfs.proto\032\020HdfsServer.proto\"$\n\016" + "JournalIdProto\022\022\n\nidentifier\030\001 \002(\t\"\212\001\n\020R" + "equestInfoProto\0227\n\tjournalId\030\001 \002(\0132$.had" + "oop.hdfs.qjournal.JournalIdProto\022\r\n\005epoc" + "h\030\002 \002(\004\022\027\n\017ipcSerialNumber\030\003 \002(\004\022\025\n\rcomm" + "ittedTxId\030\004 \001(\004\"M\n\021SegmentStateProto\022\021\n\t" + "startTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\022\024\n\014isIn" + "Progress\030\003 \002(\010\"t\n\032PersistedRecoveryPaxos" + "Data\022=\n\014segmentState\030\001 \002(\0132\'.hadoop.hdfs", ".qjournal.SegmentStateProto\022\027\n\017acceptedI" + "nEpoch\030\002 \002(\004\"\232\001\n\023JournalRequestProto\0227\n\007" + "reqInfo\030\001 \002(\0132&.hadoop.hdfs.qjournal.Req" + "uestInfoProto\022\022\n\nfirstTxnId\030\002 \002(\004\022\017\n\007num" + "Txns\030\003 \002(\r\022\017\n\007records\030\004 \002(\014\022\024\n\014segmentTx" + "nId\030\005 \002(\004\"\026\n\024JournalResponseProto\"P\n\025Hea" + "rtbeatRequestProto\0227\n\007reqInfo\030\001 \002(\0132&.ha" + "doop.hdfs.qjournal.RequestInfoProto\"\030\n\026H" + "eartbeatResponseProto\"{\n\033StartLogSegment" + "RequestProto\0227\n\007reqInfo\030\001 \002(\0132&.hadoop.h", "dfs.qjournal.RequestInfoProto\022\014\n\004txid\030\002 " + "\002(\004\022\025\n\rlayoutVersion\030\003 \001(\021\"\036\n\034StartLogSe" + "gmentResponseProto\"}\n\036FinalizeLogSegment" + "RequestProto\0227\n\007reqInfo\030\001 \002(\0132&.hadoop.h" + "dfs.qjournal.RequestInfoProto\022\021\n\tstartTx" + "Id\030\002 \002(\004\022\017\n\007endTxId\030\003 \002(\004\"!\n\037FinalizeLog" + "SegmentResponseProto\"g\n\025PurgeLogsRequest" + "Proto\0227\n\007reqInfo\030\001 \002(\0132&.hadoop.hdfs.qjo" + "urnal.RequestInfoProto\022\025\n\rminTxIdToKeep\030" + "\002 \002(\004\"\030\n\026PurgeLogsResponseProto\"L\n\027IsFor", "mattedRequestProto\0221\n\003jid\030\001 \002(\0132$.hadoop" + ".hdfs.qjournal.JournalIdProto\"/\n\030IsForma" + "ttedResponseProto\022\023\n\013isFormatted\030\001 \002(\010\"c" + "\n\033DiscardSegmentsRequestProto\0221\n\003jid\030\001 \002" + "(\0132$.hadoop.hdfs.qjournal.JournalIdProto" + "\022\021\n\tstartTxId\030\002 \002(\004\"\036\n\034DiscardSegmentsRe" + "sponseProto\"P\n\033GetJournalCTimeRequestPro" + "to\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs.qjournal.J" + "ournalIdProto\"3\n\034GetJournalCTimeResponse" + "Proto\022\023\n\013resultCTime\030\001 \002(\003\"M\n\030DoPreUpgra", "deRequestProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdf" + "s.qjournal.JournalIdProto\"\033\n\031DoPreUpgrad" + "eResponseProto\"x\n\025DoUpgradeRequestProto\022" + "1\n\003jid\030\001 \002(\0132$.hadoop.hdfs.qjournal.Jour" + "nalIdProto\022,\n\005sInfo\030\002 \002(\0132\035.hadoop.hdfs." + "StorageInfoProto\"\030\n\026DoUpgradeResponsePro" + "to\"K\n\026DoFinalizeRequestProto\0221\n\003jid\030\001 \002(" + "\0132$.hadoop.hdfs.qjournal.JournalIdProto\"" + "\031\n\027DoFinalizeResponseProto\"\315\001\n\027CanRollBa" + "ckRequestProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdf", "s.qjournal.JournalIdProto\022.\n\007storage\030\002 \002" + "(\0132\035.hadoop.hdfs.StorageInfoProto\0222\n\013pre" + "vStorage\030\003 \002(\0132\035.hadoop.hdfs.StorageInfo" + "Proto\022\033\n\023targetLayoutVersion\030\004 \002(\005\"/\n\030Ca" + "nRollBackResponseProto\022\023\n\013canRollBack\030\001 " + "\002(\010\"K\n\026DoRollbackRequestProto\0221\n\003jid\030\001 \002" + "(\0132$.hadoop.hdfs.qjournal.JournalIdProto" + "\"\031\n\027DoRollbackResponseProto\"P\n\033GetJourna" + "lStateRequestProto\0221\n\003jid\030\001 \002(\0132$.hadoop" + ".hdfs.qjournal.JournalIdProto\"\\\n\034GetJour", "nalStateResponseProto\022\031\n\021lastPromisedEpo" + "ch\030\001 \002(\004\022\020\n\010httpPort\030\002 \002(\r\022\017\n\007fromURL\030\003 " + "\001(\t\"x\n\022FormatRequestProto\0221\n\003jid\030\001 \002(\0132$" + ".hadoop.hdfs.qjournal.JournalIdProto\022/\n\006" + "nsInfo\030\002 \002(\0132\037.hadoop.hdfs.NamespaceInfo" + "Proto\"\025\n\023FormatResponseProto\"\211\001\n\024NewEpoc" + "hRequestProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs" + ".qjournal.JournalIdProto\022/\n\006nsInfo\030\002 \002(\013" + "2\037.hadoop.hdfs.NamespaceInfoProto\022\r\n\005epo" + "ch\030\003 \002(\004\"0\n\025NewEpochResponseProto\022\027\n\017las", "tSegmentTxId\030\001 \001(\004\"\203\001\n\036GetEditLogManifes" + "tRequestProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs" + ".qjournal.JournalIdProto\022\021\n\tsinceTxId\030\002 " + "\002(\004\022\033\n\014inProgressOk\030\004 \001(\010:\005false\"\177\n\037GetE" + "ditLogManifestResponseProto\0229\n\010manifest\030" + "\001 \002(\0132\'.hadoop.hdfs.RemoteEditLogManifes" + "tProto\022\020\n\010httpPort\030\002 \002(\r\022\017\n\007fromURL\030\003 \001(" + "\t\"v\n\035GetJournaledEditsRequestProto\0221\n\003ji" + "d\030\001 \002(\0132$.hadoop.hdfs.qjournal.JournalId" + "Proto\022\021\n\tsinceTxId\030\002 \002(\004\022\017\n\007maxTxns\030\003 \002(", "\r\"C\n\036GetJournaledEditsResponseProto\022\020\n\010t" + "xnCount\030\001 \002(\r\022\017\n\007editLog\030\002 \001(\014\"k\n\033Prepar" + "eRecoveryRequestProto\0227\n\007reqInfo\030\001 \002(\0132&" + ".hadoop.hdfs.qjournal.RequestInfoProto\022\023" + "\n\013segmentTxId\030\002 \002(\004\"\252\001\n\034PrepareRecoveryR" + "esponseProto\022=\n\014segmentState\030\001 \001(\0132\'.had" + "oop.hdfs.qjournal.SegmentStateProto\022\027\n\017a" + "cceptedInEpoch\030\002 \001(\004\022\027\n\017lastWriterEpoch\030" + "\003 \002(\004\022\031\n\021lastCommittedTxId\030\004 \001(\004\"\246\001\n\032Acc" + "eptRecoveryRequestProto\0227\n\007reqInfo\030\001 \002(\013", "2&.hadoop.hdfs.qjournal.RequestInfoProto" + "\022>\n\rstateToAccept\030\002 \002(\0132\'.hadoop.hdfs.qj" + "ournal.SegmentStateProto\022\017\n\007fromURL\030\003 \002(" + "\t\"\035\n\033AcceptRecoveryResponseProto2\373\021\n\027QJo" + "urnalProtocolService\022l\n\013isFormatted\022-.ha" + "doop.hdfs.qjournal.IsFormattedRequestPro" + "to\032..hadoop.hdfs.qjournal.IsFormattedRes" + "ponseProto\022x\n\017discardSegments\0221.hadoop.h" + "dfs.qjournal.DiscardSegmentsRequestProto" + "\0322.hadoop.hdfs.qjournal.DiscardSegmentsR", "esponseProto\022x\n\017getJournalCTime\0221.hadoop" + ".hdfs.qjournal.GetJournalCTimeRequestPro" + "to\0322.hadoop.hdfs.qjournal.GetJournalCTim" + "eResponseProto\022o\n\014doPreUpgrade\022..hadoop." + "hdfs.qjournal.DoPreUpgradeRequestProto\032/" + ".hadoop.hdfs.qjournal.DoPreUpgradeRespon" + "seProto\022f\n\tdoUpgrade\022+.hadoop.hdfs.qjour" + "nal.DoUpgradeRequestProto\032,.hadoop.hdfs." + "qjournal.DoUpgradeResponseProto\022i\n\ndoFin" + "alize\022,.hadoop.hdfs.qjournal.DoFinalizeR", "equestProto\032-.hadoop.hdfs.qjournal.DoFin" + "alizeResponseProto\022l\n\013canRollBack\022-.hado" + "op.hdfs.qjournal.CanRollBackRequestProto" + "\032..hadoop.hdfs.qjournal.CanRollBackRespo" + "nseProto\022i\n\ndoRollback\022,.hadoop.hdfs.qjo" + "urnal.DoRollbackRequestProto\032-.hadoop.hd" + "fs.qjournal.DoRollbackResponseProto\022x\n\017g" + "etJournalState\0221.hadoop.hdfs.qjournal.Ge" + "tJournalStateRequestProto\0322.hadoop.hdfs." + "qjournal.GetJournalStateResponseProto\022c\n", "\010newEpoch\022*.hadoop.hdfs.qjournal.NewEpoc" + "hRequestProto\032+.hadoop.hdfs.qjournal.New" + "EpochResponseProto\022]\n\006format\022(.hadoop.hd" + "fs.qjournal.FormatRequestProto\032).hadoop." + "hdfs.qjournal.FormatResponseProto\022`\n\007jou" + "rnal\022).hadoop.hdfs.qjournal.JournalReque" + "stProto\032*.hadoop.hdfs.qjournal.JournalRe" + "sponseProto\022f\n\theartbeat\022+.hadoop.hdfs.q" + "journal.HeartbeatRequestProto\032,.hadoop.h" + "dfs.qjournal.HeartbeatResponseProto\022x\n\017s", "tartLogSegment\0221.hadoop.hdfs.qjournal.St" + "artLogSegmentRequestProto\0322.hadoop.hdfs." + "qjournal.StartLogSegmentResponseProto\022\201\001" + "\n\022finalizeLogSegment\0224.hadoop.hdfs.qjour" + "nal.FinalizeLogSegmentRequestProto\0325.had" + "oop.hdfs.qjournal.FinalizeLogSegmentResp" + "onseProto\022f\n\tpurgeLogs\022+.hadoop.hdfs.qjo" + "urnal.PurgeLogsRequestProto\032,.hadoop.hdf" + "s.qjournal.PurgeLogsResponseProto\022\201\001\n\022ge" + "tEditLogManifest\0224.hadoop.hdfs.qjournal.", "GetEditLogManifestRequestProto\0325.hadoop." + "hdfs.qjournal.GetEditLogManifestResponse" + "Proto\022~\n\021getJournaledEdits\0223.hadoop.hdfs" + ".qjournal.GetJournaledEditsRequestProto\032" + "4.hadoop.hdfs.qjournal.GetJournaledEdits" + "ResponseProto\022x\n\017prepareRecovery\0221.hadoo" + "p.hdfs.qjournal.PrepareRecoveryRequestPr" + "oto\0322.hadoop.hdfs.qjournal.PrepareRecove" + "ryResponseProto\022u\n\016acceptRecovery\0220.hado" + "op.hdfs.qjournal.AcceptRecoveryRequestPr", "oto\0321.hadoop.hdfs.qjournal.AcceptRecover" + "yResponseProtoBH\n(org.apache.hadoop.hdfs" + ".qjournal.protocolB\026QJournalProtocolProt" + "os\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor, new java.lang.String[] { "Identifier", }); internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor, new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", }); internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor, new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", }); internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor, new java.lang.String[] { "SegmentState", "AcceptedInEpoch", }); internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor, new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", }); internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor, new java.lang.String[] { "ReqInfo", }); internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor, new java.lang.String[] { "ReqInfo", "Txid", "LayoutVersion", }); internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor, new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", }); internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor = getDescriptor().getMessageTypes().get(11); internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor, new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", }); internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor, new java.lang.String[] { "Jid", }); internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor, new java.lang.String[] { "IsFormatted", }); internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor = getDescriptor().getMessageTypes().get(16); internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor, new java.lang.String[] { "Jid", "StartTxId", }); internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor = getDescriptor().getMessageTypes().get(17); internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor, new java.lang.String[] { "Jid", }); internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor = getDescriptor().getMessageTypes().get(19); internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor, new java.lang.String[] { "ResultCTime", }); internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor, new java.lang.String[] { "Jid", }); internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor = getDescriptor().getMessageTypes().get(21); internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor = getDescriptor().getMessageTypes().get(22); internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor, new java.lang.String[] { "Jid", "SInfo", }); internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor, new java.lang.String[] { "Jid", }); internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor = getDescriptor().getMessageTypes().get(25); internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor = getDescriptor().getMessageTypes().get(26); internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor, new java.lang.String[] { "Jid", "Storage", "PrevStorage", "TargetLayoutVersion", }); internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor = getDescriptor().getMessageTypes().get(27); internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor, new java.lang.String[] { "CanRollBack", }); internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor = getDescriptor().getMessageTypes().get(28); internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor, new java.lang.String[] { "Jid", }); internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor = getDescriptor().getMessageTypes().get(29); internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor = getDescriptor().getMessageTypes().get(30); internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor, new java.lang.String[] { "Jid", }); internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor = getDescriptor().getMessageTypes().get(31); internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor, new java.lang.String[] { "LastPromisedEpoch", "HttpPort", "FromURL", }); internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor = getDescriptor().getMessageTypes().get(32); internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor, new java.lang.String[] { "Jid", "NsInfo", }); internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor = getDescriptor().getMessageTypes().get(33); internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor = getDescriptor().getMessageTypes().get(34); internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor, new java.lang.String[] { "Jid", "NsInfo", "Epoch", }); internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor = getDescriptor().getMessageTypes().get(35); internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor, new java.lang.String[] { "LastSegmentTxId", }); internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor = getDescriptor().getMessageTypes().get(36); internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor, new java.lang.String[] { "Jid", "SinceTxId", "InProgressOk", }); internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor = getDescriptor().getMessageTypes().get(37); internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor, new java.lang.String[] { "Manifest", "HttpPort", "FromURL", }); internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_descriptor = getDescriptor().getMessageTypes().get(38); internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetJournaledEditsRequestProto_descriptor, new java.lang.String[] { "Jid", "SinceTxId", "MaxTxns", }); internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_descriptor = getDescriptor().getMessageTypes().get(39); internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_GetJournaledEditsResponseProto_descriptor, new java.lang.String[] { "TxnCount", "EditLog", }); internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor = getDescriptor().getMessageTypes().get(40); internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor, new java.lang.String[] { "ReqInfo", "SegmentTxId", }); internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor = getDescriptor().getMessageTypes().get(41); internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor, new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", }); internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor = getDescriptor().getMessageTypes().get(42); internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor, new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", }); internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor = getDescriptor().getMessageTypes().get(43); internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor, new java.lang.String[] { }); return null; } }; com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.getDescriptor(), }, assigner); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy