All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.protocol.proto.InotifyProtos Maven / Gradle / Ivy

There is a newer version: 3.4.1
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: inotify.proto

package org.apache.hadoop.hdfs.protocol.proto;

public final class InotifyProtos {
  private InotifyProtos() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  /**
   * Protobuf enum {@code hadoop.hdfs.EventType}
   */
  public enum EventType
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * EVENT_CREATE = 0;
     */
    EVENT_CREATE(0),
    /**
     * EVENT_CLOSE = 1;
     */
    EVENT_CLOSE(1),
    /**
     * EVENT_APPEND = 2;
     */
    EVENT_APPEND(2),
    /**
     * EVENT_RENAME = 3;
     */
    EVENT_RENAME(3),
    /**
     * EVENT_METADATA = 4;
     */
    EVENT_METADATA(4),
    /**
     * EVENT_UNLINK = 5;
     */
    EVENT_UNLINK(5),
    /**
     * EVENT_TRUNCATE = 6;
     */
    EVENT_TRUNCATE(6),
    ;

    /**
     * EVENT_CREATE = 0;
     */
    public static final int EVENT_CREATE_VALUE = 0;
    /**
     * EVENT_CLOSE = 1;
     */
    public static final int EVENT_CLOSE_VALUE = 1;
    /**
     * EVENT_APPEND = 2;
     */
    public static final int EVENT_APPEND_VALUE = 2;
    /**
     * EVENT_RENAME = 3;
     */
    public static final int EVENT_RENAME_VALUE = 3;
    /**
     * EVENT_METADATA = 4;
     */
    public static final int EVENT_METADATA_VALUE = 4;
    /**
     * EVENT_UNLINK = 5;
     */
    public static final int EVENT_UNLINK_VALUE = 5;
    /**
     * EVENT_TRUNCATE = 6;
     */
    public static final int EVENT_TRUNCATE_VALUE = 6;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static EventType valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static EventType forNumber(int value) {
      switch (value) {
        case 0: return EVENT_CREATE;
        case 1: return EVENT_CLOSE;
        case 2: return EVENT_APPEND;
        case 3: return EVENT_RENAME;
        case 4: return EVENT_METADATA;
        case 5: return EVENT_UNLINK;
        case 6: return EVENT_TRUNCATE;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        EventType> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
            public EventType findValueByNumber(int number) {
              return EventType.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.getDescriptor().getEnumTypes().get(0);
    }

    private static final EventType[] VALUES = values();

    public static EventType valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private EventType(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.EventType)
  }

  /**
   * Protobuf enum {@code hadoop.hdfs.INodeType}
   */
  public enum INodeType
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * I_TYPE_FILE = 0;
     */
    I_TYPE_FILE(0),
    /**
     * I_TYPE_DIRECTORY = 1;
     */
    I_TYPE_DIRECTORY(1),
    /**
     * I_TYPE_SYMLINK = 2;
     */
    I_TYPE_SYMLINK(2),
    ;

    /**
     * I_TYPE_FILE = 0;
     */
    public static final int I_TYPE_FILE_VALUE = 0;
    /**
     * I_TYPE_DIRECTORY = 1;
     */
    public static final int I_TYPE_DIRECTORY_VALUE = 1;
    /**
     * I_TYPE_SYMLINK = 2;
     */
    public static final int I_TYPE_SYMLINK_VALUE = 2;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static INodeType valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static INodeType forNumber(int value) {
      switch (value) {
        case 0: return I_TYPE_FILE;
        case 1: return I_TYPE_DIRECTORY;
        case 2: return I_TYPE_SYMLINK;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        INodeType> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
            public INodeType findValueByNumber(int number) {
              return INodeType.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.getDescriptor().getEnumTypes().get(1);
    }

    private static final INodeType[] VALUES = values();

    public static INodeType valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private INodeType(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.INodeType)
  }

  /**
   * Protobuf enum {@code hadoop.hdfs.MetadataUpdateType}
   */
  public enum MetadataUpdateType
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * META_TYPE_TIMES = 0;
     */
    META_TYPE_TIMES(0),
    /**
     * META_TYPE_REPLICATION = 1;
     */
    META_TYPE_REPLICATION(1),
    /**
     * META_TYPE_OWNER = 2;
     */
    META_TYPE_OWNER(2),
    /**
     * META_TYPE_PERMS = 3;
     */
    META_TYPE_PERMS(3),
    /**
     * META_TYPE_ACLS = 4;
     */
    META_TYPE_ACLS(4),
    /**
     * META_TYPE_XATTRS = 5;
     */
    META_TYPE_XATTRS(5),
    ;

    /**
     * META_TYPE_TIMES = 0;
     */
    public static final int META_TYPE_TIMES_VALUE = 0;
    /**
     * META_TYPE_REPLICATION = 1;
     */
    public static final int META_TYPE_REPLICATION_VALUE = 1;
    /**
     * META_TYPE_OWNER = 2;
     */
    public static final int META_TYPE_OWNER_VALUE = 2;
    /**
     * META_TYPE_PERMS = 3;
     */
    public static final int META_TYPE_PERMS_VALUE = 3;
    /**
     * META_TYPE_ACLS = 4;
     */
    public static final int META_TYPE_ACLS_VALUE = 4;
    /**
     * META_TYPE_XATTRS = 5;
     */
    public static final int META_TYPE_XATTRS_VALUE = 5;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static MetadataUpdateType valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static MetadataUpdateType forNumber(int value) {
      switch (value) {
        case 0: return META_TYPE_TIMES;
        case 1: return META_TYPE_REPLICATION;
        case 2: return META_TYPE_OWNER;
        case 3: return META_TYPE_PERMS;
        case 4: return META_TYPE_ACLS;
        case 5: return META_TYPE_XATTRS;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        MetadataUpdateType> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
            public MetadataUpdateType findValueByNumber(int number) {
              return MetadataUpdateType.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.getDescriptor().getEnumTypes().get(2);
    }

    private static final MetadataUpdateType[] VALUES = values();

    public static MetadataUpdateType valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private MetadataUpdateType(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.MetadataUpdateType)
  }

  public interface EventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.EventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required .hadoop.hdfs.EventType type = 1;
     * @return Whether the type field is set.
     */
    boolean hasType();
    /**
     * required .hadoop.hdfs.EventType type = 1;
     * @return The type.
     */
    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType getType();

    /**
     * required bytes contents = 2;
     * @return Whether the contents field is set.
     */
    boolean hasContents();
    /**
     * required bytes contents = 2;
     * @return The contents.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getContents();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.EventProto}
   */
  public static final class EventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.EventProto)
      EventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use EventProto.newBuilder() to construct.
    private EventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private EventProto() {
      type_ = 0;
      contents_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new EventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder.class);
    }

    private int bitField0_;
    public static final int TYPE_FIELD_NUMBER = 1;
    private int type_ = 0;
    /**
     * required .hadoop.hdfs.EventType type = 1;
     * @return Whether the type field is set.
     */
    @java.lang.Override public boolean hasType() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required .hadoop.hdfs.EventType type = 1;
     * @return The type.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType getType() {
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType result = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType.forNumber(type_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType.EVENT_CREATE : result;
    }

    public static final int CONTENTS_FIELD_NUMBER = 2;
    private org.apache.hadoop.thirdparty.protobuf.ByteString contents_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * required bytes contents = 2;
     * @return Whether the contents field is set.
     */
    @java.lang.Override
    public boolean hasContents() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * required bytes contents = 2;
     * @return The contents.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getContents() {
      return contents_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasType()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasContents()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBytes(2, contents_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(2, contents_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto) obj;

      if (hasType() != other.hasType()) return false;
      if (hasType()) {
        if (type_ != other.type_) return false;
      }
      if (hasContents() != other.hasContents()) return false;
      if (hasContents()) {
        if (!getContents()
            .equals(other.getContents())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasType()) {
        hash = (37 * hash) + TYPE_FIELD_NUMBER;
        hash = (53 * hash) + type_;
      }
      if (hasContents()) {
        hash = (37 * hash) + CONTENTS_FIELD_NUMBER;
        hash = (53 * hash) + getContents().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.EventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.EventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        type_ = 0;
        contents_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.type_ = type_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.contents_ = contents_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.getDefaultInstance()) return this;
        if (other.hasType()) {
          setType(other.getType());
        }
        if (other.hasContents()) {
          setContents(other.getContents());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasType()) {
          return false;
        }
        if (!hasContents()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  type_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                contents_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int type_ = 0;
      /**
       * required .hadoop.hdfs.EventType type = 1;
       * @return Whether the type field is set.
       */
      @java.lang.Override public boolean hasType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required .hadoop.hdfs.EventType type = 1;
       * @return The type.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType getType() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType result = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType.forNumber(type_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType.EVENT_CREATE : result;
      }
      /**
       * required .hadoop.hdfs.EventType type = 1;
       * @param value The type to set.
       * @return This builder for chaining.
       */
      public Builder setType(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventType value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        type_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * required .hadoop.hdfs.EventType type = 1;
       * @return This builder for chaining.
       */
      public Builder clearType() {
        bitField0_ = (bitField0_ & ~0x00000001);
        type_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString contents_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * required bytes contents = 2;
       * @return Whether the contents field is set.
       */
      @java.lang.Override
      public boolean hasContents() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * required bytes contents = 2;
       * @return The contents.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getContents() {
        return contents_;
      }
      /**
       * required bytes contents = 2;
       * @param value The contents to set.
       * @return This builder for chaining.
       */
      public Builder setContents(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        contents_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * required bytes contents = 2;
       * @return This builder for chaining.
       */
      public Builder clearContents() {
        bitField0_ = (bitField0_ & ~0x00000002);
        contents_ = getDefaultInstance().getContents();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.EventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.EventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public EventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface EventBatchProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.EventBatchProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required int64 txid = 1;
     * @return Whether the txid field is set.
     */
    boolean hasTxid();
    /**
     * required int64 txid = 1;
     * @return The txid.
     */
    long getTxid();

    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    java.util.List 
        getEventsList();
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getEvents(int index);
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    int getEventsCount();
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    java.util.List 
        getEventsOrBuilderList();
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder getEventsOrBuilder(
        int index);
  }
  /**
   * Protobuf type {@code hadoop.hdfs.EventBatchProto}
   */
  public static final class EventBatchProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.EventBatchProto)
      EventBatchProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use EventBatchProto.newBuilder() to construct.
    private EventBatchProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private EventBatchProto() {
      events_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new EventBatchProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventBatchProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventBatchProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder.class);
    }

    private int bitField0_;
    public static final int TXID_FIELD_NUMBER = 1;
    private long txid_ = 0L;
    /**
     * required int64 txid = 1;
     * @return Whether the txid field is set.
     */
    @java.lang.Override
    public boolean hasTxid() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required int64 txid = 1;
     * @return The txid.
     */
    @java.lang.Override
    public long getTxid() {
      return txid_;
    }

    public static final int EVENTS_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List events_;
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    @java.lang.Override
    public java.util.List getEventsList() {
      return events_;
    }
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    @java.lang.Override
    public java.util.List 
        getEventsOrBuilderList() {
      return events_;
    }
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    @java.lang.Override
    public int getEventsCount() {
      return events_.size();
    }
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getEvents(int index) {
      return events_.get(index);
    }
    /**
     * repeated .hadoop.hdfs.EventProto events = 2;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder getEventsOrBuilder(
        int index) {
      return events_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasTxid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getEventsCount(); i++) {
        if (!getEvents(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeInt64(1, txid_);
      }
      for (int i = 0; i < events_.size(); i++) {
        output.writeMessage(2, events_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(1, txid_);
      }
      for (int i = 0; i < events_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, events_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto) obj;

      if (hasTxid() != other.hasTxid()) return false;
      if (hasTxid()) {
        if (getTxid()
            != other.getTxid()) return false;
      }
      if (!getEventsList()
          .equals(other.getEventsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasTxid()) {
        hash = (37 * hash) + TXID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTxid());
      }
      if (getEventsCount() > 0) {
        hash = (37 * hash) + EVENTS_FIELD_NUMBER;
        hash = (53 * hash) + getEventsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.EventBatchProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.EventBatchProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventBatchProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventBatchProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        txid_ = 0L;
        if (eventsBuilder_ == null) {
          events_ = java.util.Collections.emptyList();
        } else {
          events_ = null;
          eventsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventBatchProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto result) {
        if (eventsBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0)) {
            events_ = java.util.Collections.unmodifiableList(events_);
            bitField0_ = (bitField0_ & ~0x00000002);
          }
          result.events_ = events_;
        } else {
          result.events_ = eventsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.txid_ = txid_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.getDefaultInstance()) return this;
        if (other.hasTxid()) {
          setTxid(other.getTxid());
        }
        if (eventsBuilder_ == null) {
          if (!other.events_.isEmpty()) {
            if (events_.isEmpty()) {
              events_ = other.events_;
              bitField0_ = (bitField0_ & ~0x00000002);
            } else {
              ensureEventsIsMutable();
              events_.addAll(other.events_);
            }
            onChanged();
          }
        } else {
          if (!other.events_.isEmpty()) {
            if (eventsBuilder_.isEmpty()) {
              eventsBuilder_.dispose();
              eventsBuilder_ = null;
              events_ = other.events_;
              bitField0_ = (bitField0_ & ~0x00000002);
              eventsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getEventsFieldBuilder() : null;
            } else {
              eventsBuilder_.addAllMessages(other.events_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasTxid()) {
          return false;
        }
        for (int i = 0; i < getEventsCount(); i++) {
          if (!getEvents(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                txid_ = input.readInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 18: {
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.PARSER,
                        extensionRegistry);
                if (eventsBuilder_ == null) {
                  ensureEventsIsMutable();
                  events_.add(m);
                } else {
                  eventsBuilder_.addMessage(m);
                }
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long txid_ ;
      /**
       * required int64 txid = 1;
       * @return Whether the txid field is set.
       */
      @java.lang.Override
      public boolean hasTxid() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required int64 txid = 1;
       * @return The txid.
       */
      @java.lang.Override
      public long getTxid() {
        return txid_;
      }
      /**
       * required int64 txid = 1;
       * @param value The txid to set.
       * @return This builder for chaining.
       */
      public Builder setTxid(long value) {
        
        txid_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * required int64 txid = 1;
       * @return This builder for chaining.
       */
      public Builder clearTxid() {
        bitField0_ = (bitField0_ & ~0x00000001);
        txid_ = 0L;
        onChanged();
        return this;
      }

      private java.util.List events_ =
        java.util.Collections.emptyList();
      private void ensureEventsIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          events_ = new java.util.ArrayList(events_);
          bitField0_ |= 0x00000002;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder> eventsBuilder_;

      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public java.util.List getEventsList() {
        if (eventsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(events_);
        } else {
          return eventsBuilder_.getMessageList();
        }
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public int getEventsCount() {
        if (eventsBuilder_ == null) {
          return events_.size();
        } else {
          return eventsBuilder_.getCount();
        }
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getEvents(int index) {
        if (eventsBuilder_ == null) {
          return events_.get(index);
        } else {
          return eventsBuilder_.getMessage(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder setEvents(
          int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto value) {
        if (eventsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureEventsIsMutable();
          events_.set(index, value);
          onChanged();
        } else {
          eventsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder setEvents(
          int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder builderForValue) {
        if (eventsBuilder_ == null) {
          ensureEventsIsMutable();
          events_.set(index, builderForValue.build());
          onChanged();
        } else {
          eventsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder addEvents(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto value) {
        if (eventsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureEventsIsMutable();
          events_.add(value);
          onChanged();
        } else {
          eventsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder addEvents(
          int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto value) {
        if (eventsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureEventsIsMutable();
          events_.add(index, value);
          onChanged();
        } else {
          eventsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder addEvents(
          org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder builderForValue) {
        if (eventsBuilder_ == null) {
          ensureEventsIsMutable();
          events_.add(builderForValue.build());
          onChanged();
        } else {
          eventsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder addEvents(
          int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder builderForValue) {
        if (eventsBuilder_ == null) {
          ensureEventsIsMutable();
          events_.add(index, builderForValue.build());
          onChanged();
        } else {
          eventsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder addAllEvents(
          java.lang.Iterable values) {
        if (eventsBuilder_ == null) {
          ensureEventsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, events_);
          onChanged();
        } else {
          eventsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder clearEvents() {
        if (eventsBuilder_ == null) {
          events_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
        } else {
          eventsBuilder_.clear();
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public Builder removeEvents(int index) {
        if (eventsBuilder_ == null) {
          ensureEventsIsMutable();
          events_.remove(index);
          onChanged();
        } else {
          eventsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder getEventsBuilder(
          int index) {
        return getEventsFieldBuilder().getBuilder(index);
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder getEventsOrBuilder(
          int index) {
        if (eventsBuilder_ == null) {
          return events_.get(index);  } else {
          return eventsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public java.util.List 
           getEventsOrBuilderList() {
        if (eventsBuilder_ != null) {
          return eventsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(events_);
        }
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder addEventsBuilder() {
        return getEventsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder addEventsBuilder(
          int index) {
        return getEventsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.EventProto events = 2;
       */
      public java.util.List 
           getEventsBuilderList() {
        return getEventsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder> 
          getEventsFieldBuilder() {
        if (eventsBuilder_ == null) {
          eventsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder>(
                  events_,
                  ((bitField0_ & 0x00000002) != 0),
                  getParentForChildren(),
                  isClean());
          events_ = null;
        }
        return eventsBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.EventBatchProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.EventBatchProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public EventBatchProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CreateEventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CreateEventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required .hadoop.hdfs.INodeType type = 1;
     * @return Whether the type field is set.
     */
    boolean hasType();
    /**
     * required .hadoop.hdfs.INodeType type = 1;
     * @return The type.
     */
    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType getType();

    /**
     * required string path = 2;
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * required string path = 2;
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * required string path = 2;
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * required int64 ctime = 3;
     * @return Whether the ctime field is set.
     */
    boolean hasCtime();
    /**
     * required int64 ctime = 3;
     * @return The ctime.
     */
    long getCtime();

    /**
     * required string ownerName = 4;
     * @return Whether the ownerName field is set.
     */
    boolean hasOwnerName();
    /**
     * required string ownerName = 4;
     * @return The ownerName.
     */
    java.lang.String getOwnerName();
    /**
     * required string ownerName = 4;
     * @return The bytes for ownerName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerNameBytes();

    /**
     * required string groupName = 5;
     * @return Whether the groupName field is set.
     */
    boolean hasGroupName();
    /**
     * required string groupName = 5;
     * @return The groupName.
     */
    java.lang.String getGroupName();
    /**
     * required string groupName = 5;
     * @return The bytes for groupName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupNameBytes();

    /**
     * required .hadoop.hdfs.FsPermissionProto perms = 6;
     * @return Whether the perms field is set.
     */
    boolean hasPerms();
    /**
     * required .hadoop.hdfs.FsPermissionProto perms = 6;
     * @return The perms.
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPerms();
    /**
     * required .hadoop.hdfs.FsPermissionProto perms = 6;
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermsOrBuilder();

    /**
     * optional int32 replication = 7;
     * @return Whether the replication field is set.
     */
    boolean hasReplication();
    /**
     * optional int32 replication = 7;
     * @return The replication.
     */
    int getReplication();

    /**
     * optional string symlinkTarget = 8;
     * @return Whether the symlinkTarget field is set.
     */
    boolean hasSymlinkTarget();
    /**
     * optional string symlinkTarget = 8;
     * @return The symlinkTarget.
     */
    java.lang.String getSymlinkTarget();
    /**
     * optional string symlinkTarget = 8;
     * @return The bytes for symlinkTarget.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSymlinkTargetBytes();

    /**
     * optional bool overwrite = 9;
     * @return Whether the overwrite field is set.
     */
    boolean hasOverwrite();
    /**
     * optional bool overwrite = 9;
     * @return The overwrite.
     */
    boolean getOverwrite();

    /**
     * optional int64 defaultBlockSize = 10 [default = 0];
     * @return Whether the defaultBlockSize field is set.
     */
    boolean hasDefaultBlockSize();
    /**
     * optional int64 defaultBlockSize = 10 [default = 0];
     * @return The defaultBlockSize.
     */
    long getDefaultBlockSize();

    /**
     * optional bool erasureCoded = 11;
     * @return Whether the erasureCoded field is set.
     */
    boolean hasErasureCoded();
    /**
     * optional bool erasureCoded = 11;
     * @return The erasureCoded.
     */
    boolean getErasureCoded();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.CreateEventProto}
   */
  public static final class CreateEventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.CreateEventProto)
      CreateEventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CreateEventProto.newBuilder() to construct.
    private CreateEventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private CreateEventProto() {
      type_ = 0;
      path_ = "";
      ownerName_ = "";
      groupName_ = "";
      symlinkTarget_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CreateEventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CreateEventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CreateEventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto.Builder.class);
    }

    private int bitField0_;
    public static final int TYPE_FIELD_NUMBER = 1;
    private int type_ = 0;
    /**
     * required .hadoop.hdfs.INodeType type = 1;
     * @return Whether the type field is set.
     */
    @java.lang.Override public boolean hasType() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required .hadoop.hdfs.INodeType type = 1;
     * @return The type.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType getType() {
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType result = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType.forNumber(type_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType.I_TYPE_FILE : result;
    }

    public static final int PATH_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * required string path = 2;
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * required string path = 2;
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * required string path = 2;
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int CTIME_FIELD_NUMBER = 3;
    private long ctime_ = 0L;
    /**
     * required int64 ctime = 3;
     * @return Whether the ctime field is set.
     */
    @java.lang.Override
    public boolean hasCtime() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * required int64 ctime = 3;
     * @return The ctime.
     */
    @java.lang.Override
    public long getCtime() {
      return ctime_;
    }

    public static final int OWNERNAME_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private volatile java.lang.Object ownerName_ = "";
    /**
     * required string ownerName = 4;
     * @return Whether the ownerName field is set.
     */
    @java.lang.Override
    public boolean hasOwnerName() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * required string ownerName = 4;
     * @return The ownerName.
     */
    @java.lang.Override
    public java.lang.String getOwnerName() {
      java.lang.Object ref = ownerName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          ownerName_ = s;
        }
        return s;
      }
    }
    /**
     * required string ownerName = 4;
     * @return The bytes for ownerName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerNameBytes() {
      java.lang.Object ref = ownerName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        ownerName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int GROUPNAME_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private volatile java.lang.Object groupName_ = "";
    /**
     * required string groupName = 5;
     * @return Whether the groupName field is set.
     */
    @java.lang.Override
    public boolean hasGroupName() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * required string groupName = 5;
     * @return The groupName.
     */
    @java.lang.Override
    public java.lang.String getGroupName() {
      java.lang.Object ref = groupName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          groupName_ = s;
        }
        return s;
      }
    }
    /**
     * required string groupName = 5;
     * @return The bytes for groupName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupNameBytes() {
      java.lang.Object ref = groupName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        groupName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int PERMS_FIELD_NUMBER = 6;
    private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto perms_;
    /**
     * required .hadoop.hdfs.FsPermissionProto perms = 6;
     * @return Whether the perms field is set.
     */
    @java.lang.Override
    public boolean hasPerms() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * required .hadoop.hdfs.FsPermissionProto perms = 6;
     * @return The perms.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPerms() {
      return perms_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
    }
    /**
     * required .hadoop.hdfs.FsPermissionProto perms = 6;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermsOrBuilder() {
      return perms_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
    }

    public static final int REPLICATION_FIELD_NUMBER = 7;
    private int replication_ = 0;
    /**
     * optional int32 replication = 7;
     * @return Whether the replication field is set.
     */
    @java.lang.Override
    public boolean hasReplication() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * optional int32 replication = 7;
     * @return The replication.
     */
    @java.lang.Override
    public int getReplication() {
      return replication_;
    }

    public static final int SYMLINKTARGET_FIELD_NUMBER = 8;
    @SuppressWarnings("serial")
    private volatile java.lang.Object symlinkTarget_ = "";
    /**
     * optional string symlinkTarget = 8;
     * @return Whether the symlinkTarget field is set.
     */
    @java.lang.Override
    public boolean hasSymlinkTarget() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * optional string symlinkTarget = 8;
     * @return The symlinkTarget.
     */
    @java.lang.Override
    public java.lang.String getSymlinkTarget() {
      java.lang.Object ref = symlinkTarget_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          symlinkTarget_ = s;
        }
        return s;
      }
    }
    /**
     * optional string symlinkTarget = 8;
     * @return The bytes for symlinkTarget.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSymlinkTargetBytes() {
      java.lang.Object ref = symlinkTarget_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        symlinkTarget_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int OVERWRITE_FIELD_NUMBER = 9;
    private boolean overwrite_ = false;
    /**
     * optional bool overwrite = 9;
     * @return Whether the overwrite field is set.
     */
    @java.lang.Override
    public boolean hasOverwrite() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * optional bool overwrite = 9;
     * @return The overwrite.
     */
    @java.lang.Override
    public boolean getOverwrite() {
      return overwrite_;
    }

    public static final int DEFAULTBLOCKSIZE_FIELD_NUMBER = 10;
    private long defaultBlockSize_ = 0L;
    /**
     * optional int64 defaultBlockSize = 10 [default = 0];
     * @return Whether the defaultBlockSize field is set.
     */
    @java.lang.Override
    public boolean hasDefaultBlockSize() {
      return ((bitField0_ & 0x00000200) != 0);
    }
    /**
     * optional int64 defaultBlockSize = 10 [default = 0];
     * @return The defaultBlockSize.
     */
    @java.lang.Override
    public long getDefaultBlockSize() {
      return defaultBlockSize_;
    }

    public static final int ERASURECODED_FIELD_NUMBER = 11;
    private boolean erasureCoded_ = false;
    /**
     * optional bool erasureCoded = 11;
     * @return Whether the erasureCoded field is set.
     */
    @java.lang.Override
    public boolean hasErasureCoded() {
      return ((bitField0_ & 0x00000400) != 0);
    }
    /**
     * optional bool erasureCoded = 11;
     * @return The erasureCoded.
     */
    @java.lang.Override
    public boolean getErasureCoded() {
      return erasureCoded_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasType()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCtime()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasOwnerName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasGroupName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasPerms()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getPerms().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, path_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeInt64(3, ctime_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, ownerName_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, groupName_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeMessage(6, getPerms());
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeInt32(7, replication_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, symlinkTarget_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        output.writeBool(9, overwrite_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        output.writeInt64(10, defaultBlockSize_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        output.writeBool(11, erasureCoded_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, path_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(3, ctime_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, ownerName_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, groupName_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(6, getPerms());
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt32Size(7, replication_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(8, symlinkTarget_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(9, overwrite_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(10, defaultBlockSize_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(11, erasureCoded_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto) obj;

      if (hasType() != other.hasType()) return false;
      if (hasType()) {
        if (type_ != other.type_) return false;
      }
      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasCtime() != other.hasCtime()) return false;
      if (hasCtime()) {
        if (getCtime()
            != other.getCtime()) return false;
      }
      if (hasOwnerName() != other.hasOwnerName()) return false;
      if (hasOwnerName()) {
        if (!getOwnerName()
            .equals(other.getOwnerName())) return false;
      }
      if (hasGroupName() != other.hasGroupName()) return false;
      if (hasGroupName()) {
        if (!getGroupName()
            .equals(other.getGroupName())) return false;
      }
      if (hasPerms() != other.hasPerms()) return false;
      if (hasPerms()) {
        if (!getPerms()
            .equals(other.getPerms())) return false;
      }
      if (hasReplication() != other.hasReplication()) return false;
      if (hasReplication()) {
        if (getReplication()
            != other.getReplication()) return false;
      }
      if (hasSymlinkTarget() != other.hasSymlinkTarget()) return false;
      if (hasSymlinkTarget()) {
        if (!getSymlinkTarget()
            .equals(other.getSymlinkTarget())) return false;
      }
      if (hasOverwrite() != other.hasOverwrite()) return false;
      if (hasOverwrite()) {
        if (getOverwrite()
            != other.getOverwrite()) return false;
      }
      if (hasDefaultBlockSize() != other.hasDefaultBlockSize()) return false;
      if (hasDefaultBlockSize()) {
        if (getDefaultBlockSize()
            != other.getDefaultBlockSize()) return false;
      }
      if (hasErasureCoded() != other.hasErasureCoded()) return false;
      if (hasErasureCoded()) {
        if (getErasureCoded()
            != other.getErasureCoded()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasType()) {
        hash = (37 * hash) + TYPE_FIELD_NUMBER;
        hash = (53 * hash) + type_;
      }
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasCtime()) {
        hash = (37 * hash) + CTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCtime());
      }
      if (hasOwnerName()) {
        hash = (37 * hash) + OWNERNAME_FIELD_NUMBER;
        hash = (53 * hash) + getOwnerName().hashCode();
      }
      if (hasGroupName()) {
        hash = (37 * hash) + GROUPNAME_FIELD_NUMBER;
        hash = (53 * hash) + getGroupName().hashCode();
      }
      if (hasPerms()) {
        hash = (37 * hash) + PERMS_FIELD_NUMBER;
        hash = (53 * hash) + getPerms().hashCode();
      }
      if (hasReplication()) {
        hash = (37 * hash) + REPLICATION_FIELD_NUMBER;
        hash = (53 * hash) + getReplication();
      }
      if (hasSymlinkTarget()) {
        hash = (37 * hash) + SYMLINKTARGET_FIELD_NUMBER;
        hash = (53 * hash) + getSymlinkTarget().hashCode();
      }
      if (hasOverwrite()) {
        hash = (37 * hash) + OVERWRITE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getOverwrite());
      }
      if (hasDefaultBlockSize()) {
        hash = (37 * hash) + DEFAULTBLOCKSIZE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getDefaultBlockSize());
      }
      if (hasErasureCoded()) {
        hash = (37 * hash) + ERASURECODED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getErasureCoded());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.CreateEventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CreateEventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CreateEventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CreateEventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getPermsFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        type_ = 0;
        path_ = "";
        ctime_ = 0L;
        ownerName_ = "";
        groupName_ = "";
        perms_ = null;
        if (permsBuilder_ != null) {
          permsBuilder_.dispose();
          permsBuilder_ = null;
        }
        replication_ = 0;
        symlinkTarget_ = "";
        overwrite_ = false;
        defaultBlockSize_ = 0L;
        erasureCoded_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CreateEventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.type_ = type_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.ctime_ = ctime_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.ownerName_ = ownerName_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.groupName_ = groupName_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.perms_ = permsBuilder_ == null
              ? perms_
              : permsBuilder_.build();
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.replication_ = replication_;
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.symlinkTarget_ = symlinkTarget_;
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.overwrite_ = overwrite_;
          to_bitField0_ |= 0x00000100;
        }
        if (((from_bitField0_ & 0x00000200) != 0)) {
          result.defaultBlockSize_ = defaultBlockSize_;
          to_bitField0_ |= 0x00000200;
        }
        if (((from_bitField0_ & 0x00000400) != 0)) {
          result.erasureCoded_ = erasureCoded_;
          to_bitField0_ |= 0x00000400;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto.getDefaultInstance()) return this;
        if (other.hasType()) {
          setType(other.getType());
        }
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasCtime()) {
          setCtime(other.getCtime());
        }
        if (other.hasOwnerName()) {
          ownerName_ = other.ownerName_;
          bitField0_ |= 0x00000008;
          onChanged();
        }
        if (other.hasGroupName()) {
          groupName_ = other.groupName_;
          bitField0_ |= 0x00000010;
          onChanged();
        }
        if (other.hasPerms()) {
          mergePerms(other.getPerms());
        }
        if (other.hasReplication()) {
          setReplication(other.getReplication());
        }
        if (other.hasSymlinkTarget()) {
          symlinkTarget_ = other.symlinkTarget_;
          bitField0_ |= 0x00000080;
          onChanged();
        }
        if (other.hasOverwrite()) {
          setOverwrite(other.getOverwrite());
        }
        if (other.hasDefaultBlockSize()) {
          setDefaultBlockSize(other.getDefaultBlockSize());
        }
        if (other.hasErasureCoded()) {
          setErasureCoded(other.getErasureCoded());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasType()) {
          return false;
        }
        if (!hasPath()) {
          return false;
        }
        if (!hasCtime()) {
          return false;
        }
        if (!hasOwnerName()) {
          return false;
        }
        if (!hasGroupName()) {
          return false;
        }
        if (!hasPerms()) {
          return false;
        }
        if (!getPerms().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  type_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 24: {
                ctime_ = input.readInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                ownerName_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                groupName_ = input.readBytes();
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 50: {
                input.readMessage(
                    getPermsFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              case 56: {
                replication_ = input.readInt32();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              case 66: {
                symlinkTarget_ = input.readBytes();
                bitField0_ |= 0x00000080;
                break;
              } // case 66
              case 72: {
                overwrite_ = input.readBool();
                bitField0_ |= 0x00000100;
                break;
              } // case 72
              case 80: {
                defaultBlockSize_ = input.readInt64();
                bitField0_ |= 0x00000200;
                break;
              } // case 80
              case 88: {
                erasureCoded_ = input.readBool();
                bitField0_ |= 0x00000400;
                break;
              } // case 88
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int type_ = 0;
      /**
       * required .hadoop.hdfs.INodeType type = 1;
       * @return Whether the type field is set.
       */
      @java.lang.Override public boolean hasType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required .hadoop.hdfs.INodeType type = 1;
       * @return The type.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType getType() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType result = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType.forNumber(type_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType.I_TYPE_FILE : result;
      }
      /**
       * required .hadoop.hdfs.INodeType type = 1;
       * @param value The type to set.
       * @return This builder for chaining.
       */
      public Builder setType(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.INodeType value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        type_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * required .hadoop.hdfs.INodeType type = 1;
       * @return This builder for chaining.
       */
      public Builder clearType() {
        bitField0_ = (bitField0_ & ~0x00000001);
        type_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object path_ = "";
      /**
       * required string path = 2;
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * required string path = 2;
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string path = 2;
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string path = 2;
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * required string path = 2;
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * required string path = 2;
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private long ctime_ ;
      /**
       * required int64 ctime = 3;
       * @return Whether the ctime field is set.
       */
      @java.lang.Override
      public boolean hasCtime() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * required int64 ctime = 3;
       * @return The ctime.
       */
      @java.lang.Override
      public long getCtime() {
        return ctime_;
      }
      /**
       * required int64 ctime = 3;
       * @param value The ctime to set.
       * @return This builder for chaining.
       */
      public Builder setCtime(long value) {
        
        ctime_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * required int64 ctime = 3;
       * @return This builder for chaining.
       */
      public Builder clearCtime() {
        bitField0_ = (bitField0_ & ~0x00000004);
        ctime_ = 0L;
        onChanged();
        return this;
      }

      private java.lang.Object ownerName_ = "";
      /**
       * required string ownerName = 4;
       * @return Whether the ownerName field is set.
       */
      public boolean hasOwnerName() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * required string ownerName = 4;
       * @return The ownerName.
       */
      public java.lang.String getOwnerName() {
        java.lang.Object ref = ownerName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            ownerName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string ownerName = 4;
       * @return The bytes for ownerName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getOwnerNameBytes() {
        java.lang.Object ref = ownerName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          ownerName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string ownerName = 4;
       * @param value The ownerName to set.
       * @return This builder for chaining.
       */
      public Builder setOwnerName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ownerName_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * required string ownerName = 4;
       * @return This builder for chaining.
       */
      public Builder clearOwnerName() {
        ownerName_ = getDefaultInstance().getOwnerName();
        bitField0_ = (bitField0_ & ~0x00000008);
        onChanged();
        return this;
      }
      /**
       * required string ownerName = 4;
       * @param value The bytes for ownerName to set.
       * @return This builder for chaining.
       */
      public Builder setOwnerNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ownerName_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }

      private java.lang.Object groupName_ = "";
      /**
       * required string groupName = 5;
       * @return Whether the groupName field is set.
       */
      public boolean hasGroupName() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * required string groupName = 5;
       * @return The groupName.
       */
      public java.lang.String getGroupName() {
        java.lang.Object ref = groupName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            groupName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string groupName = 5;
       * @return The bytes for groupName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getGroupNameBytes() {
        java.lang.Object ref = groupName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          groupName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string groupName = 5;
       * @param value The groupName to set.
       * @return This builder for chaining.
       */
      public Builder setGroupName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        groupName_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * required string groupName = 5;
       * @return This builder for chaining.
       */
      public Builder clearGroupName() {
        groupName_ = getDefaultInstance().getGroupName();
        bitField0_ = (bitField0_ & ~0x00000010);
        onChanged();
        return this;
      }
      /**
       * required string groupName = 5;
       * @param value The bytes for groupName to set.
       * @return This builder for chaining.
       */
      public Builder setGroupNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        groupName_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto perms_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permsBuilder_;
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       * @return Whether the perms field is set.
       */
      public boolean hasPerms() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       * @return The perms.
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPerms() {
        if (permsBuilder_ == null) {
          return perms_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
        } else {
          return permsBuilder_.getMessage();
        }
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       */
      public Builder setPerms(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          perms_ = value;
        } else {
          permsBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       */
      public Builder setPerms(
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) {
        if (permsBuilder_ == null) {
          perms_ = builderForValue.build();
        } else {
          permsBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       */
      public Builder mergePerms(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permsBuilder_ == null) {
          if (((bitField0_ & 0x00000020) != 0) &&
            perms_ != null &&
            perms_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) {
            getPermsBuilder().mergeFrom(value);
          } else {
            perms_ = value;
          }
        } else {
          permsBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       */
      public Builder clearPerms() {
        bitField0_ = (bitField0_ & ~0x00000020);
        perms_ = null;
        if (permsBuilder_ != null) {
          permsBuilder_.dispose();
          permsBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermsBuilder() {
        bitField0_ |= 0x00000020;
        onChanged();
        return getPermsFieldBuilder().getBuilder();
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermsOrBuilder() {
        if (permsBuilder_ != null) {
          return permsBuilder_.getMessageOrBuilder();
        } else {
          return perms_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
        }
      }
      /**
       * required .hadoop.hdfs.FsPermissionProto perms = 6;
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> 
          getPermsFieldBuilder() {
        if (permsBuilder_ == null) {
          permsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>(
                  getPerms(),
                  getParentForChildren(),
                  isClean());
          perms_ = null;
        }
        return permsBuilder_;
      }

      private int replication_ ;
      /**
       * optional int32 replication = 7;
       * @return Whether the replication field is set.
       */
      @java.lang.Override
      public boolean hasReplication() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * optional int32 replication = 7;
       * @return The replication.
       */
      @java.lang.Override
      public int getReplication() {
        return replication_;
      }
      /**
       * optional int32 replication = 7;
       * @param value The replication to set.
       * @return This builder for chaining.
       */
      public Builder setReplication(int value) {
        
        replication_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * optional int32 replication = 7;
       * @return This builder for chaining.
       */
      public Builder clearReplication() {
        bitField0_ = (bitField0_ & ~0x00000040);
        replication_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object symlinkTarget_ = "";
      /**
       * optional string symlinkTarget = 8;
       * @return Whether the symlinkTarget field is set.
       */
      public boolean hasSymlinkTarget() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * optional string symlinkTarget = 8;
       * @return The symlinkTarget.
       */
      public java.lang.String getSymlinkTarget() {
        java.lang.Object ref = symlinkTarget_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            symlinkTarget_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * optional string symlinkTarget = 8;
       * @return The bytes for symlinkTarget.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSymlinkTargetBytes() {
        java.lang.Object ref = symlinkTarget_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          symlinkTarget_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * optional string symlinkTarget = 8;
       * @param value The symlinkTarget to set.
       * @return This builder for chaining.
       */
      public Builder setSymlinkTarget(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        symlinkTarget_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * optional string symlinkTarget = 8;
       * @return This builder for chaining.
       */
      public Builder clearSymlinkTarget() {
        symlinkTarget_ = getDefaultInstance().getSymlinkTarget();
        bitField0_ = (bitField0_ & ~0x00000080);
        onChanged();
        return this;
      }
      /**
       * optional string symlinkTarget = 8;
       * @param value The bytes for symlinkTarget to set.
       * @return This builder for chaining.
       */
      public Builder setSymlinkTargetBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        symlinkTarget_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }

      private boolean overwrite_ ;
      /**
       * optional bool overwrite = 9;
       * @return Whether the overwrite field is set.
       */
      @java.lang.Override
      public boolean hasOverwrite() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * optional bool overwrite = 9;
       * @return The overwrite.
       */
      @java.lang.Override
      public boolean getOverwrite() {
        return overwrite_;
      }
      /**
       * optional bool overwrite = 9;
       * @param value The overwrite to set.
       * @return This builder for chaining.
       */
      public Builder setOverwrite(boolean value) {
        
        overwrite_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * optional bool overwrite = 9;
       * @return This builder for chaining.
       */
      public Builder clearOverwrite() {
        bitField0_ = (bitField0_ & ~0x00000100);
        overwrite_ = false;
        onChanged();
        return this;
      }

      private long defaultBlockSize_ ;
      /**
       * optional int64 defaultBlockSize = 10 [default = 0];
       * @return Whether the defaultBlockSize field is set.
       */
      @java.lang.Override
      public boolean hasDefaultBlockSize() {
        return ((bitField0_ & 0x00000200) != 0);
      }
      /**
       * optional int64 defaultBlockSize = 10 [default = 0];
       * @return The defaultBlockSize.
       */
      @java.lang.Override
      public long getDefaultBlockSize() {
        return defaultBlockSize_;
      }
      /**
       * optional int64 defaultBlockSize = 10 [default = 0];
       * @param value The defaultBlockSize to set.
       * @return This builder for chaining.
       */
      public Builder setDefaultBlockSize(long value) {
        
        defaultBlockSize_ = value;
        bitField0_ |= 0x00000200;
        onChanged();
        return this;
      }
      /**
       * optional int64 defaultBlockSize = 10 [default = 0];
       * @return This builder for chaining.
       */
      public Builder clearDefaultBlockSize() {
        bitField0_ = (bitField0_ & ~0x00000200);
        defaultBlockSize_ = 0L;
        onChanged();
        return this;
      }

      private boolean erasureCoded_ ;
      /**
       * optional bool erasureCoded = 11;
       * @return Whether the erasureCoded field is set.
       */
      @java.lang.Override
      public boolean hasErasureCoded() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * optional bool erasureCoded = 11;
       * @return The erasureCoded.
       */
      @java.lang.Override
      public boolean getErasureCoded() {
        return erasureCoded_;
      }
      /**
       * optional bool erasureCoded = 11;
       * @param value The erasureCoded to set.
       * @return This builder for chaining.
       */
      public Builder setErasureCoded(boolean value) {
        
        erasureCoded_ = value;
        bitField0_ |= 0x00000400;
        onChanged();
        return this;
      }
      /**
       * optional bool erasureCoded = 11;
       * @return This builder for chaining.
       */
      public Builder clearErasureCoded() {
        bitField0_ = (bitField0_ & ~0x00000400);
        erasureCoded_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CreateEventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.CreateEventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public CreateEventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CreateEventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CloseEventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CloseEventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * required string path = 1;
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * required int64 fileSize = 2;
     * @return Whether the fileSize field is set.
     */
    boolean hasFileSize();
    /**
     * required int64 fileSize = 2;
     * @return The fileSize.
     */
    long getFileSize();

    /**
     * required int64 timestamp = 3;
     * @return Whether the timestamp field is set.
     */
    boolean hasTimestamp();
    /**
     * required int64 timestamp = 3;
     * @return The timestamp.
     */
    long getTimestamp();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.CloseEventProto}
   */
  public static final class CloseEventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.CloseEventProto)
      CloseEventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CloseEventProto.newBuilder() to construct.
    private CloseEventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private CloseEventProto() {
      path_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CloseEventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CloseEventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CloseEventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto.Builder.class);
    }

    private int bitField0_;
    public static final int PATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required string path = 1;
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int FILESIZE_FIELD_NUMBER = 2;
    private long fileSize_ = 0L;
    /**
     * required int64 fileSize = 2;
     * @return Whether the fileSize field is set.
     */
    @java.lang.Override
    public boolean hasFileSize() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * required int64 fileSize = 2;
     * @return The fileSize.
     */
    @java.lang.Override
    public long getFileSize() {
      return fileSize_;
    }

    public static final int TIMESTAMP_FIELD_NUMBER = 3;
    private long timestamp_ = 0L;
    /**
     * required int64 timestamp = 3;
     * @return Whether the timestamp field is set.
     */
    @java.lang.Override
    public boolean hasTimestamp() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * required int64 timestamp = 3;
     * @return The timestamp.
     */
    @java.lang.Override
    public long getTimestamp() {
      return timestamp_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFileSize()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasTimestamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt64(2, fileSize_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeInt64(3, timestamp_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(2, fileSize_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(3, timestamp_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto) obj;

      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasFileSize() != other.hasFileSize()) return false;
      if (hasFileSize()) {
        if (getFileSize()
            != other.getFileSize()) return false;
      }
      if (hasTimestamp() != other.hasTimestamp()) return false;
      if (hasTimestamp()) {
        if (getTimestamp()
            != other.getTimestamp()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasFileSize()) {
        hash = (37 * hash) + FILESIZE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFileSize());
      }
      if (hasTimestamp()) {
        hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTimestamp());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.CloseEventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CloseEventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CloseEventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CloseEventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        path_ = "";
        fileSize_ = 0L;
        timestamp_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_CloseEventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.fileSize_ = fileSize_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.timestamp_ = timestamp_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto.getDefaultInstance()) return this;
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasFileSize()) {
          setFileSize(other.getFileSize());
        }
        if (other.hasTimestamp()) {
          setTimestamp(other.getTimestamp());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPath()) {
          return false;
        }
        if (!hasFileSize()) {
          return false;
        }
        if (!hasTimestamp()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                fileSize_ = input.readInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                timestamp_ = input.readInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object path_ = "";
      /**
       * required string path = 1;
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required string path = 1;
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string path = 1;
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string path = 1;
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long fileSize_ ;
      /**
       * required int64 fileSize = 2;
       * @return Whether the fileSize field is set.
       */
      @java.lang.Override
      public boolean hasFileSize() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * required int64 fileSize = 2;
       * @return The fileSize.
       */
      @java.lang.Override
      public long getFileSize() {
        return fileSize_;
      }
      /**
       * required int64 fileSize = 2;
       * @param value The fileSize to set.
       * @return This builder for chaining.
       */
      public Builder setFileSize(long value) {
        
        fileSize_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * required int64 fileSize = 2;
       * @return This builder for chaining.
       */
      public Builder clearFileSize() {
        bitField0_ = (bitField0_ & ~0x00000002);
        fileSize_ = 0L;
        onChanged();
        return this;
      }

      private long timestamp_ ;
      /**
       * required int64 timestamp = 3;
       * @return Whether the timestamp field is set.
       */
      @java.lang.Override
      public boolean hasTimestamp() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * required int64 timestamp = 3;
       * @return The timestamp.
       */
      @java.lang.Override
      public long getTimestamp() {
        return timestamp_;
      }
      /**
       * required int64 timestamp = 3;
       * @param value The timestamp to set.
       * @return This builder for chaining.
       */
      public Builder setTimestamp(long value) {
        
        timestamp_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * required int64 timestamp = 3;
       * @return This builder for chaining.
       */
      public Builder clearTimestamp() {
        bitField0_ = (bitField0_ & ~0x00000004);
        timestamp_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CloseEventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.CloseEventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public CloseEventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.CloseEventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface TruncateEventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.TruncateEventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * required string path = 1;
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * required int64 fileSize = 2;
     * @return Whether the fileSize field is set.
     */
    boolean hasFileSize();
    /**
     * required int64 fileSize = 2;
     * @return The fileSize.
     */
    long getFileSize();

    /**
     * required int64 timestamp = 3;
     * @return Whether the timestamp field is set.
     */
    boolean hasTimestamp();
    /**
     * required int64 timestamp = 3;
     * @return The timestamp.
     */
    long getTimestamp();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.TruncateEventProto}
   */
  public static final class TruncateEventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.TruncateEventProto)
      TruncateEventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use TruncateEventProto.newBuilder() to construct.
    private TruncateEventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private TruncateEventProto() {
      path_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new TruncateEventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_TruncateEventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_TruncateEventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto.Builder.class);
    }

    private int bitField0_;
    public static final int PATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required string path = 1;
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int FILESIZE_FIELD_NUMBER = 2;
    private long fileSize_ = 0L;
    /**
     * required int64 fileSize = 2;
     * @return Whether the fileSize field is set.
     */
    @java.lang.Override
    public boolean hasFileSize() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * required int64 fileSize = 2;
     * @return The fileSize.
     */
    @java.lang.Override
    public long getFileSize() {
      return fileSize_;
    }

    public static final int TIMESTAMP_FIELD_NUMBER = 3;
    private long timestamp_ = 0L;
    /**
     * required int64 timestamp = 3;
     * @return Whether the timestamp field is set.
     */
    @java.lang.Override
    public boolean hasTimestamp() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * required int64 timestamp = 3;
     * @return The timestamp.
     */
    @java.lang.Override
    public long getTimestamp() {
      return timestamp_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFileSize()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasTimestamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt64(2, fileSize_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeInt64(3, timestamp_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(2, fileSize_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(3, timestamp_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto) obj;

      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasFileSize() != other.hasFileSize()) return false;
      if (hasFileSize()) {
        if (getFileSize()
            != other.getFileSize()) return false;
      }
      if (hasTimestamp() != other.hasTimestamp()) return false;
      if (hasTimestamp()) {
        if (getTimestamp()
            != other.getTimestamp()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasFileSize()) {
        hash = (37 * hash) + FILESIZE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFileSize());
      }
      if (hasTimestamp()) {
        hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTimestamp());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.TruncateEventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.TruncateEventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_TruncateEventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_TruncateEventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        path_ = "";
        fileSize_ = 0L;
        timestamp_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_TruncateEventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.fileSize_ = fileSize_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.timestamp_ = timestamp_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto.getDefaultInstance()) return this;
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasFileSize()) {
          setFileSize(other.getFileSize());
        }
        if (other.hasTimestamp()) {
          setTimestamp(other.getTimestamp());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPath()) {
          return false;
        }
        if (!hasFileSize()) {
          return false;
        }
        if (!hasTimestamp()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                fileSize_ = input.readInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                timestamp_ = input.readInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object path_ = "";
      /**
       * required string path = 1;
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required string path = 1;
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string path = 1;
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string path = 1;
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long fileSize_ ;
      /**
       * required int64 fileSize = 2;
       * @return Whether the fileSize field is set.
       */
      @java.lang.Override
      public boolean hasFileSize() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * required int64 fileSize = 2;
       * @return The fileSize.
       */
      @java.lang.Override
      public long getFileSize() {
        return fileSize_;
      }
      /**
       * required int64 fileSize = 2;
       * @param value The fileSize to set.
       * @return This builder for chaining.
       */
      public Builder setFileSize(long value) {
        
        fileSize_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * required int64 fileSize = 2;
       * @return This builder for chaining.
       */
      public Builder clearFileSize() {
        bitField0_ = (bitField0_ & ~0x00000002);
        fileSize_ = 0L;
        onChanged();
        return this;
      }

      private long timestamp_ ;
      /**
       * required int64 timestamp = 3;
       * @return Whether the timestamp field is set.
       */
      @java.lang.Override
      public boolean hasTimestamp() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * required int64 timestamp = 3;
       * @return The timestamp.
       */
      @java.lang.Override
      public long getTimestamp() {
        return timestamp_;
      }
      /**
       * required int64 timestamp = 3;
       * @param value The timestamp to set.
       * @return This builder for chaining.
       */
      public Builder setTimestamp(long value) {
        
        timestamp_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * required int64 timestamp = 3;
       * @return This builder for chaining.
       */
      public Builder clearTimestamp() {
        bitField0_ = (bitField0_ & ~0x00000004);
        timestamp_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.TruncateEventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.TruncateEventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public TruncateEventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.TruncateEventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface AppendEventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.AppendEventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * required string path = 1;
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * optional bool newBlock = 2 [default = false];
     * @return Whether the newBlock field is set.
     */
    boolean hasNewBlock();
    /**
     * optional bool newBlock = 2 [default = false];
     * @return The newBlock.
     */
    boolean getNewBlock();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.AppendEventProto}
   */
  public static final class AppendEventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.AppendEventProto)
      AppendEventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use AppendEventProto.newBuilder() to construct.
    private AppendEventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private AppendEventProto() {
      path_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new AppendEventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_AppendEventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_AppendEventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto.Builder.class);
    }

    private int bitField0_;
    public static final int PATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required string path = 1;
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int NEWBLOCK_FIELD_NUMBER = 2;
    private boolean newBlock_ = false;
    /**
     * optional bool newBlock = 2 [default = false];
     * @return Whether the newBlock field is set.
     */
    @java.lang.Override
    public boolean hasNewBlock() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * optional bool newBlock = 2 [default = false];
     * @return The newBlock.
     */
    @java.lang.Override
    public boolean getNewBlock() {
      return newBlock_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBool(2, newBlock_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(2, newBlock_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto) obj;

      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasNewBlock() != other.hasNewBlock()) return false;
      if (hasNewBlock()) {
        if (getNewBlock()
            != other.getNewBlock()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasNewBlock()) {
        hash = (37 * hash) + NEWBLOCK_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getNewBlock());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.AppendEventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.AppendEventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_AppendEventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_AppendEventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        path_ = "";
        newBlock_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_AppendEventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.newBlock_ = newBlock_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto.getDefaultInstance()) return this;
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasNewBlock()) {
          setNewBlock(other.getNewBlock());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPath()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                newBlock_ = input.readBool();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object path_ = "";
      /**
       * required string path = 1;
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required string path = 1;
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string path = 1;
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string path = 1;
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private boolean newBlock_ ;
      /**
       * optional bool newBlock = 2 [default = false];
       * @return Whether the newBlock field is set.
       */
      @java.lang.Override
      public boolean hasNewBlock() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * optional bool newBlock = 2 [default = false];
       * @return The newBlock.
       */
      @java.lang.Override
      public boolean getNewBlock() {
        return newBlock_;
      }
      /**
       * optional bool newBlock = 2 [default = false];
       * @param value The newBlock to set.
       * @return This builder for chaining.
       */
      public Builder setNewBlock(boolean value) {
        
        newBlock_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * optional bool newBlock = 2 [default = false];
       * @return This builder for chaining.
       */
      public Builder clearNewBlock() {
        bitField0_ = (bitField0_ & ~0x00000002);
        newBlock_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AppendEventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.AppendEventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public AppendEventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.AppendEventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RenameEventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RenameEventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required string srcPath = 1;
     * @return Whether the srcPath field is set.
     */
    boolean hasSrcPath();
    /**
     * required string srcPath = 1;
     * @return The srcPath.
     */
    java.lang.String getSrcPath();
    /**
     * required string srcPath = 1;
     * @return The bytes for srcPath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSrcPathBytes();

    /**
     * required string destPath = 2;
     * @return Whether the destPath field is set.
     */
    boolean hasDestPath();
    /**
     * required string destPath = 2;
     * @return The destPath.
     */
    java.lang.String getDestPath();
    /**
     * required string destPath = 2;
     * @return The bytes for destPath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getDestPathBytes();

    /**
     * required int64 timestamp = 3;
     * @return Whether the timestamp field is set.
     */
    boolean hasTimestamp();
    /**
     * required int64 timestamp = 3;
     * @return The timestamp.
     */
    long getTimestamp();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.RenameEventProto}
   */
  public static final class RenameEventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.RenameEventProto)
      RenameEventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RenameEventProto.newBuilder() to construct.
    private RenameEventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private RenameEventProto() {
      srcPath_ = "";
      destPath_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RenameEventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_RenameEventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_RenameEventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto.Builder.class);
    }

    private int bitField0_;
    public static final int SRCPATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object srcPath_ = "";
    /**
     * required string srcPath = 1;
     * @return Whether the srcPath field is set.
     */
    @java.lang.Override
    public boolean hasSrcPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required string srcPath = 1;
     * @return The srcPath.
     */
    @java.lang.Override
    public java.lang.String getSrcPath() {
      java.lang.Object ref = srcPath_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          srcPath_ = s;
        }
        return s;
      }
    }
    /**
     * required string srcPath = 1;
     * @return The bytes for srcPath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSrcPathBytes() {
      java.lang.Object ref = srcPath_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        srcPath_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int DESTPATH_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object destPath_ = "";
    /**
     * required string destPath = 2;
     * @return Whether the destPath field is set.
     */
    @java.lang.Override
    public boolean hasDestPath() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * required string destPath = 2;
     * @return The destPath.
     */
    @java.lang.Override
    public java.lang.String getDestPath() {
      java.lang.Object ref = destPath_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          destPath_ = s;
        }
        return s;
      }
    }
    /**
     * required string destPath = 2;
     * @return The bytes for destPath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getDestPathBytes() {
      java.lang.Object ref = destPath_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        destPath_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int TIMESTAMP_FIELD_NUMBER = 3;
    private long timestamp_ = 0L;
    /**
     * required int64 timestamp = 3;
     * @return Whether the timestamp field is set.
     */
    @java.lang.Override
    public boolean hasTimestamp() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * required int64 timestamp = 3;
     * @return The timestamp.
     */
    @java.lang.Override
    public long getTimestamp() {
      return timestamp_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSrcPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDestPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasTimestamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, srcPath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, destPath_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeInt64(3, timestamp_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, srcPath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, destPath_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(3, timestamp_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto) obj;

      if (hasSrcPath() != other.hasSrcPath()) return false;
      if (hasSrcPath()) {
        if (!getSrcPath()
            .equals(other.getSrcPath())) return false;
      }
      if (hasDestPath() != other.hasDestPath()) return false;
      if (hasDestPath()) {
        if (!getDestPath()
            .equals(other.getDestPath())) return false;
      }
      if (hasTimestamp() != other.hasTimestamp()) return false;
      if (hasTimestamp()) {
        if (getTimestamp()
            != other.getTimestamp()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSrcPath()) {
        hash = (37 * hash) + SRCPATH_FIELD_NUMBER;
        hash = (53 * hash) + getSrcPath().hashCode();
      }
      if (hasDestPath()) {
        hash = (37 * hash) + DESTPATH_FIELD_NUMBER;
        hash = (53 * hash) + getDestPath().hashCode();
      }
      if (hasTimestamp()) {
        hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTimestamp());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.RenameEventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RenameEventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_RenameEventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_RenameEventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        srcPath_ = "";
        destPath_ = "";
        timestamp_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_RenameEventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.srcPath_ = srcPath_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.destPath_ = destPath_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.timestamp_ = timestamp_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto.getDefaultInstance()) return this;
        if (other.hasSrcPath()) {
          srcPath_ = other.srcPath_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasDestPath()) {
          destPath_ = other.destPath_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasTimestamp()) {
          setTimestamp(other.getTimestamp());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSrcPath()) {
          return false;
        }
        if (!hasDestPath()) {
          return false;
        }
        if (!hasTimestamp()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                srcPath_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                destPath_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 24: {
                timestamp_ = input.readInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object srcPath_ = "";
      /**
       * required string srcPath = 1;
       * @return Whether the srcPath field is set.
       */
      public boolean hasSrcPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required string srcPath = 1;
       * @return The srcPath.
       */
      public java.lang.String getSrcPath() {
        java.lang.Object ref = srcPath_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            srcPath_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string srcPath = 1;
       * @return The bytes for srcPath.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSrcPathBytes() {
        java.lang.Object ref = srcPath_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          srcPath_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string srcPath = 1;
       * @param value The srcPath to set.
       * @return This builder for chaining.
       */
      public Builder setSrcPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        srcPath_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * required string srcPath = 1;
       * @return This builder for chaining.
       */
      public Builder clearSrcPath() {
        srcPath_ = getDefaultInstance().getSrcPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * required string srcPath = 1;
       * @param value The bytes for srcPath to set.
       * @return This builder for chaining.
       */
      public Builder setSrcPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        srcPath_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object destPath_ = "";
      /**
       * required string destPath = 2;
       * @return Whether the destPath field is set.
       */
      public boolean hasDestPath() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * required string destPath = 2;
       * @return The destPath.
       */
      public java.lang.String getDestPath() {
        java.lang.Object ref = destPath_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            destPath_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string destPath = 2;
       * @return The bytes for destPath.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getDestPathBytes() {
        java.lang.Object ref = destPath_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          destPath_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string destPath = 2;
       * @param value The destPath to set.
       * @return This builder for chaining.
       */
      public Builder setDestPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        destPath_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * required string destPath = 2;
       * @return This builder for chaining.
       */
      public Builder clearDestPath() {
        destPath_ = getDefaultInstance().getDestPath();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * required string destPath = 2;
       * @param value The bytes for destPath to set.
       * @return This builder for chaining.
       */
      public Builder setDestPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        destPath_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private long timestamp_ ;
      /**
       * required int64 timestamp = 3;
       * @return Whether the timestamp field is set.
       */
      @java.lang.Override
      public boolean hasTimestamp() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * required int64 timestamp = 3;
       * @return The timestamp.
       */
      @java.lang.Override
      public long getTimestamp() {
        return timestamp_;
      }
      /**
       * required int64 timestamp = 3;
       * @param value The timestamp to set.
       * @return This builder for chaining.
       */
      public Builder setTimestamp(long value) {
        
        timestamp_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * required int64 timestamp = 3;
       * @return This builder for chaining.
       */
      public Builder clearTimestamp() {
        bitField0_ = (bitField0_ & ~0x00000004);
        timestamp_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RenameEventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.RenameEventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public RenameEventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.RenameEventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface MetadataUpdateEventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.MetadataUpdateEventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * required string path = 1;
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * required .hadoop.hdfs.MetadataUpdateType type = 2;
     * @return Whether the type field is set.
     */
    boolean hasType();
    /**
     * required .hadoop.hdfs.MetadataUpdateType type = 2;
     * @return The type.
     */
    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType getType();

    /**
     * optional int64 mtime = 3;
     * @return Whether the mtime field is set.
     */
    boolean hasMtime();
    /**
     * optional int64 mtime = 3;
     * @return The mtime.
     */
    long getMtime();

    /**
     * optional int64 atime = 4;
     * @return Whether the atime field is set.
     */
    boolean hasAtime();
    /**
     * optional int64 atime = 4;
     * @return The atime.
     */
    long getAtime();

    /**
     * optional int32 replication = 5;
     * @return Whether the replication field is set.
     */
    boolean hasReplication();
    /**
     * optional int32 replication = 5;
     * @return The replication.
     */
    int getReplication();

    /**
     * optional string ownerName = 6;
     * @return Whether the ownerName field is set.
     */
    boolean hasOwnerName();
    /**
     * optional string ownerName = 6;
     * @return The ownerName.
     */
    java.lang.String getOwnerName();
    /**
     * optional string ownerName = 6;
     * @return The bytes for ownerName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerNameBytes();

    /**
     * optional string groupName = 7;
     * @return Whether the groupName field is set.
     */
    boolean hasGroupName();
    /**
     * optional string groupName = 7;
     * @return The groupName.
     */
    java.lang.String getGroupName();
    /**
     * optional string groupName = 7;
     * @return The bytes for groupName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupNameBytes();

    /**
     * optional .hadoop.hdfs.FsPermissionProto perms = 8;
     * @return Whether the perms field is set.
     */
    boolean hasPerms();
    /**
     * optional .hadoop.hdfs.FsPermissionProto perms = 8;
     * @return The perms.
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPerms();
    /**
     * optional .hadoop.hdfs.FsPermissionProto perms = 8;
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermsOrBuilder();

    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    java.util.List 
        getAclsList();
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto getAcls(int index);
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    int getAclsCount();
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    java.util.List 
        getAclsOrBuilderList();
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProtoOrBuilder getAclsOrBuilder(
        int index);

    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    java.util.List 
        getXAttrsList();
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto getXAttrs(int index);
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    int getXAttrsCount();
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    java.util.List 
        getXAttrsOrBuilderList();
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProtoOrBuilder getXAttrsOrBuilder(
        int index);

    /**
     * optional bool xAttrsRemoved = 11;
     * @return Whether the xAttrsRemoved field is set.
     */
    boolean hasXAttrsRemoved();
    /**
     * optional bool xAttrsRemoved = 11;
     * @return The xAttrsRemoved.
     */
    boolean getXAttrsRemoved();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.MetadataUpdateEventProto}
   */
  public static final class MetadataUpdateEventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.MetadataUpdateEventProto)
      MetadataUpdateEventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use MetadataUpdateEventProto.newBuilder() to construct.
    private MetadataUpdateEventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private MetadataUpdateEventProto() {
      path_ = "";
      type_ = 0;
      ownerName_ = "";
      groupName_ = "";
      acls_ = java.util.Collections.emptyList();
      xAttrs_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new MetadataUpdateEventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_MetadataUpdateEventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_MetadataUpdateEventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto.Builder.class);
    }

    private int bitField0_;
    public static final int PATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required string path = 1;
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int TYPE_FIELD_NUMBER = 2;
    private int type_ = 0;
    /**
     * required .hadoop.hdfs.MetadataUpdateType type = 2;
     * @return Whether the type field is set.
     */
    @java.lang.Override public boolean hasType() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * required .hadoop.hdfs.MetadataUpdateType type = 2;
     * @return The type.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType getType() {
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType result = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType.forNumber(type_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType.META_TYPE_TIMES : result;
    }

    public static final int MTIME_FIELD_NUMBER = 3;
    private long mtime_ = 0L;
    /**
     * optional int64 mtime = 3;
     * @return Whether the mtime field is set.
     */
    @java.lang.Override
    public boolean hasMtime() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * optional int64 mtime = 3;
     * @return The mtime.
     */
    @java.lang.Override
    public long getMtime() {
      return mtime_;
    }

    public static final int ATIME_FIELD_NUMBER = 4;
    private long atime_ = 0L;
    /**
     * optional int64 atime = 4;
     * @return Whether the atime field is set.
     */
    @java.lang.Override
    public boolean hasAtime() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * optional int64 atime = 4;
     * @return The atime.
     */
    @java.lang.Override
    public long getAtime() {
      return atime_;
    }

    public static final int REPLICATION_FIELD_NUMBER = 5;
    private int replication_ = 0;
    /**
     * optional int32 replication = 5;
     * @return Whether the replication field is set.
     */
    @java.lang.Override
    public boolean hasReplication() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * optional int32 replication = 5;
     * @return The replication.
     */
    @java.lang.Override
    public int getReplication() {
      return replication_;
    }

    public static final int OWNERNAME_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private volatile java.lang.Object ownerName_ = "";
    /**
     * optional string ownerName = 6;
     * @return Whether the ownerName field is set.
     */
    @java.lang.Override
    public boolean hasOwnerName() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * optional string ownerName = 6;
     * @return The ownerName.
     */
    @java.lang.Override
    public java.lang.String getOwnerName() {
      java.lang.Object ref = ownerName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          ownerName_ = s;
        }
        return s;
      }
    }
    /**
     * optional string ownerName = 6;
     * @return The bytes for ownerName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerNameBytes() {
      java.lang.Object ref = ownerName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        ownerName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int GROUPNAME_FIELD_NUMBER = 7;
    @SuppressWarnings("serial")
    private volatile java.lang.Object groupName_ = "";
    /**
     * optional string groupName = 7;
     * @return Whether the groupName field is set.
     */
    @java.lang.Override
    public boolean hasGroupName() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * optional string groupName = 7;
     * @return The groupName.
     */
    @java.lang.Override
    public java.lang.String getGroupName() {
      java.lang.Object ref = groupName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          groupName_ = s;
        }
        return s;
      }
    }
    /**
     * optional string groupName = 7;
     * @return The bytes for groupName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupNameBytes() {
      java.lang.Object ref = groupName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        groupName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int PERMS_FIELD_NUMBER = 8;
    private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto perms_;
    /**
     * optional .hadoop.hdfs.FsPermissionProto perms = 8;
     * @return Whether the perms field is set.
     */
    @java.lang.Override
    public boolean hasPerms() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * optional .hadoop.hdfs.FsPermissionProto perms = 8;
     * @return The perms.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPerms() {
      return perms_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
    }
    /**
     * optional .hadoop.hdfs.FsPermissionProto perms = 8;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermsOrBuilder() {
      return perms_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
    }

    public static final int ACLS_FIELD_NUMBER = 9;
    @SuppressWarnings("serial")
    private java.util.List acls_;
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    @java.lang.Override
    public java.util.List getAclsList() {
      return acls_;
    }
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    @java.lang.Override
    public java.util.List 
        getAclsOrBuilderList() {
      return acls_;
    }
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    @java.lang.Override
    public int getAclsCount() {
      return acls_.size();
    }
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto getAcls(int index) {
      return acls_.get(index);
    }
    /**
     * repeated .hadoop.hdfs.AclEntryProto acls = 9;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProtoOrBuilder getAclsOrBuilder(
        int index) {
      return acls_.get(index);
    }

    public static final int XATTRS_FIELD_NUMBER = 10;
    @SuppressWarnings("serial")
    private java.util.List xAttrs_;
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    @java.lang.Override
    public java.util.List getXAttrsList() {
      return xAttrs_;
    }
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    @java.lang.Override
    public java.util.List 
        getXAttrsOrBuilderList() {
      return xAttrs_;
    }
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    @java.lang.Override
    public int getXAttrsCount() {
      return xAttrs_.size();
    }
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto getXAttrs(int index) {
      return xAttrs_.get(index);
    }
    /**
     * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProtoOrBuilder getXAttrsOrBuilder(
        int index) {
      return xAttrs_.get(index);
    }

    public static final int XATTRSREMOVED_FIELD_NUMBER = 11;
    private boolean xAttrsRemoved_ = false;
    /**
     * optional bool xAttrsRemoved = 11;
     * @return Whether the xAttrsRemoved field is set.
     */
    @java.lang.Override
    public boolean hasXAttrsRemoved() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * optional bool xAttrsRemoved = 11;
     * @return The xAttrsRemoved.
     */
    @java.lang.Override
    public boolean getXAttrsRemoved() {
      return xAttrsRemoved_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasType()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasPerms()) {
        if (!getPerms().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      for (int i = 0; i < getAclsCount(); i++) {
        if (!getAcls(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      for (int i = 0; i < getXAttrsCount(); i++) {
        if (!getXAttrs(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeEnum(2, type_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeInt64(3, mtime_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeInt64(4, atime_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeInt32(5, replication_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, ownerName_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 7, groupName_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeMessage(8, getPerms());
      }
      for (int i = 0; i < acls_.size(); i++) {
        output.writeMessage(9, acls_.get(i));
      }
      for (int i = 0; i < xAttrs_.size(); i++) {
        output.writeMessage(10, xAttrs_.get(i));
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        output.writeBool(11, xAttrsRemoved_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(2, type_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(3, mtime_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(4, atime_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt32Size(5, replication_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, ownerName_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(7, groupName_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(8, getPerms());
      }
      for (int i = 0; i < acls_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(9, acls_.get(i));
      }
      for (int i = 0; i < xAttrs_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(10, xAttrs_.get(i));
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(11, xAttrsRemoved_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto) obj;

      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasType() != other.hasType()) return false;
      if (hasType()) {
        if (type_ != other.type_) return false;
      }
      if (hasMtime() != other.hasMtime()) return false;
      if (hasMtime()) {
        if (getMtime()
            != other.getMtime()) return false;
      }
      if (hasAtime() != other.hasAtime()) return false;
      if (hasAtime()) {
        if (getAtime()
            != other.getAtime()) return false;
      }
      if (hasReplication() != other.hasReplication()) return false;
      if (hasReplication()) {
        if (getReplication()
            != other.getReplication()) return false;
      }
      if (hasOwnerName() != other.hasOwnerName()) return false;
      if (hasOwnerName()) {
        if (!getOwnerName()
            .equals(other.getOwnerName())) return false;
      }
      if (hasGroupName() != other.hasGroupName()) return false;
      if (hasGroupName()) {
        if (!getGroupName()
            .equals(other.getGroupName())) return false;
      }
      if (hasPerms() != other.hasPerms()) return false;
      if (hasPerms()) {
        if (!getPerms()
            .equals(other.getPerms())) return false;
      }
      if (!getAclsList()
          .equals(other.getAclsList())) return false;
      if (!getXAttrsList()
          .equals(other.getXAttrsList())) return false;
      if (hasXAttrsRemoved() != other.hasXAttrsRemoved()) return false;
      if (hasXAttrsRemoved()) {
        if (getXAttrsRemoved()
            != other.getXAttrsRemoved()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasType()) {
        hash = (37 * hash) + TYPE_FIELD_NUMBER;
        hash = (53 * hash) + type_;
      }
      if (hasMtime()) {
        hash = (37 * hash) + MTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getMtime());
      }
      if (hasAtime()) {
        hash = (37 * hash) + ATIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getAtime());
      }
      if (hasReplication()) {
        hash = (37 * hash) + REPLICATION_FIELD_NUMBER;
        hash = (53 * hash) + getReplication();
      }
      if (hasOwnerName()) {
        hash = (37 * hash) + OWNERNAME_FIELD_NUMBER;
        hash = (53 * hash) + getOwnerName().hashCode();
      }
      if (hasGroupName()) {
        hash = (37 * hash) + GROUPNAME_FIELD_NUMBER;
        hash = (53 * hash) + getGroupName().hashCode();
      }
      if (hasPerms()) {
        hash = (37 * hash) + PERMS_FIELD_NUMBER;
        hash = (53 * hash) + getPerms().hashCode();
      }
      if (getAclsCount() > 0) {
        hash = (37 * hash) + ACLS_FIELD_NUMBER;
        hash = (53 * hash) + getAclsList().hashCode();
      }
      if (getXAttrsCount() > 0) {
        hash = (37 * hash) + XATTRS_FIELD_NUMBER;
        hash = (53 * hash) + getXAttrsList().hashCode();
      }
      if (hasXAttrsRemoved()) {
        hash = (37 * hash) + XATTRSREMOVED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getXAttrsRemoved());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.MetadataUpdateEventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.MetadataUpdateEventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_MetadataUpdateEventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_MetadataUpdateEventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getPermsFieldBuilder();
          getAclsFieldBuilder();
          getXAttrsFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        path_ = "";
        type_ = 0;
        mtime_ = 0L;
        atime_ = 0L;
        replication_ = 0;
        ownerName_ = "";
        groupName_ = "";
        perms_ = null;
        if (permsBuilder_ != null) {
          permsBuilder_.dispose();
          permsBuilder_ = null;
        }
        if (aclsBuilder_ == null) {
          acls_ = java.util.Collections.emptyList();
        } else {
          acls_ = null;
          aclsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000100);
        if (xAttrsBuilder_ == null) {
          xAttrs_ = java.util.Collections.emptyList();
        } else {
          xAttrs_ = null;
          xAttrsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000200);
        xAttrsRemoved_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_MetadataUpdateEventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto result) {
        if (aclsBuilder_ == null) {
          if (((bitField0_ & 0x00000100) != 0)) {
            acls_ = java.util.Collections.unmodifiableList(acls_);
            bitField0_ = (bitField0_ & ~0x00000100);
          }
          result.acls_ = acls_;
        } else {
          result.acls_ = aclsBuilder_.build();
        }
        if (xAttrsBuilder_ == null) {
          if (((bitField0_ & 0x00000200) != 0)) {
            xAttrs_ = java.util.Collections.unmodifiableList(xAttrs_);
            bitField0_ = (bitField0_ & ~0x00000200);
          }
          result.xAttrs_ = xAttrs_;
        } else {
          result.xAttrs_ = xAttrsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.type_ = type_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.mtime_ = mtime_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.atime_ = atime_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.replication_ = replication_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.ownerName_ = ownerName_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.groupName_ = groupName_;
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.perms_ = permsBuilder_ == null
              ? perms_
              : permsBuilder_.build();
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000400) != 0)) {
          result.xAttrsRemoved_ = xAttrsRemoved_;
          to_bitField0_ |= 0x00000100;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto.getDefaultInstance()) return this;
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasType()) {
          setType(other.getType());
        }
        if (other.hasMtime()) {
          setMtime(other.getMtime());
        }
        if (other.hasAtime()) {
          setAtime(other.getAtime());
        }
        if (other.hasReplication()) {
          setReplication(other.getReplication());
        }
        if (other.hasOwnerName()) {
          ownerName_ = other.ownerName_;
          bitField0_ |= 0x00000020;
          onChanged();
        }
        if (other.hasGroupName()) {
          groupName_ = other.groupName_;
          bitField0_ |= 0x00000040;
          onChanged();
        }
        if (other.hasPerms()) {
          mergePerms(other.getPerms());
        }
        if (aclsBuilder_ == null) {
          if (!other.acls_.isEmpty()) {
            if (acls_.isEmpty()) {
              acls_ = other.acls_;
              bitField0_ = (bitField0_ & ~0x00000100);
            } else {
              ensureAclsIsMutable();
              acls_.addAll(other.acls_);
            }
            onChanged();
          }
        } else {
          if (!other.acls_.isEmpty()) {
            if (aclsBuilder_.isEmpty()) {
              aclsBuilder_.dispose();
              aclsBuilder_ = null;
              acls_ = other.acls_;
              bitField0_ = (bitField0_ & ~0x00000100);
              aclsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getAclsFieldBuilder() : null;
            } else {
              aclsBuilder_.addAllMessages(other.acls_);
            }
          }
        }
        if (xAttrsBuilder_ == null) {
          if (!other.xAttrs_.isEmpty()) {
            if (xAttrs_.isEmpty()) {
              xAttrs_ = other.xAttrs_;
              bitField0_ = (bitField0_ & ~0x00000200);
            } else {
              ensureXAttrsIsMutable();
              xAttrs_.addAll(other.xAttrs_);
            }
            onChanged();
          }
        } else {
          if (!other.xAttrs_.isEmpty()) {
            if (xAttrsBuilder_.isEmpty()) {
              xAttrsBuilder_.dispose();
              xAttrsBuilder_ = null;
              xAttrs_ = other.xAttrs_;
              bitField0_ = (bitField0_ & ~0x00000200);
              xAttrsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getXAttrsFieldBuilder() : null;
            } else {
              xAttrsBuilder_.addAllMessages(other.xAttrs_);
            }
          }
        }
        if (other.hasXAttrsRemoved()) {
          setXAttrsRemoved(other.getXAttrsRemoved());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPath()) {
          return false;
        }
        if (!hasType()) {
          return false;
        }
        if (hasPerms()) {
          if (!getPerms().isInitialized()) {
            return false;
          }
        }
        for (int i = 0; i < getAclsCount(); i++) {
          if (!getAcls(i).isInitialized()) {
            return false;
          }
        }
        for (int i = 0; i < getXAttrsCount(); i++) {
          if (!getXAttrs(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(2, tmpRaw);
                } else {
                  type_ = tmpRaw;
                  bitField0_ |= 0x00000002;
                }
                break;
              } // case 16
              case 24: {
                mtime_ = input.readInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                atime_ = input.readInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                replication_ = input.readInt32();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 50: {
                ownerName_ = input.readBytes();
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              case 58: {
                groupName_ = input.readBytes();
                bitField0_ |= 0x00000040;
                break;
              } // case 58
              case 66: {
                input.readMessage(
                    getPermsFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000080;
                break;
              } // case 66
              case 74: {
                org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.PARSER,
                        extensionRegistry);
                if (aclsBuilder_ == null) {
                  ensureAclsIsMutable();
                  acls_.add(m);
                } else {
                  aclsBuilder_.addMessage(m);
                }
                break;
              } // case 74
              case 82: {
                org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.PARSER,
                        extensionRegistry);
                if (xAttrsBuilder_ == null) {
                  ensureXAttrsIsMutable();
                  xAttrs_.add(m);
                } else {
                  xAttrsBuilder_.addMessage(m);
                }
                break;
              } // case 82
              case 88: {
                xAttrsRemoved_ = input.readBool();
                bitField0_ |= 0x00000400;
                break;
              } // case 88
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object path_ = "";
      /**
       * required string path = 1;
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required string path = 1;
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string path = 1;
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string path = 1;
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private int type_ = 0;
      /**
       * required .hadoop.hdfs.MetadataUpdateType type = 2;
       * @return Whether the type field is set.
       */
      @java.lang.Override public boolean hasType() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * required .hadoop.hdfs.MetadataUpdateType type = 2;
       * @return The type.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType getType() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType result = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType.forNumber(type_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType.META_TYPE_TIMES : result;
      }
      /**
       * required .hadoop.hdfs.MetadataUpdateType type = 2;
       * @param value The type to set.
       * @return This builder for chaining.
       */
      public Builder setType(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateType value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000002;
        type_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * required .hadoop.hdfs.MetadataUpdateType type = 2;
       * @return This builder for chaining.
       */
      public Builder clearType() {
        bitField0_ = (bitField0_ & ~0x00000002);
        type_ = 0;
        onChanged();
        return this;
      }

      private long mtime_ ;
      /**
       * optional int64 mtime = 3;
       * @return Whether the mtime field is set.
       */
      @java.lang.Override
      public boolean hasMtime() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * optional int64 mtime = 3;
       * @return The mtime.
       */
      @java.lang.Override
      public long getMtime() {
        return mtime_;
      }
      /**
       * optional int64 mtime = 3;
       * @param value The mtime to set.
       * @return This builder for chaining.
       */
      public Builder setMtime(long value) {
        
        mtime_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * optional int64 mtime = 3;
       * @return This builder for chaining.
       */
      public Builder clearMtime() {
        bitField0_ = (bitField0_ & ~0x00000004);
        mtime_ = 0L;
        onChanged();
        return this;
      }

      private long atime_ ;
      /**
       * optional int64 atime = 4;
       * @return Whether the atime field is set.
       */
      @java.lang.Override
      public boolean hasAtime() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * optional int64 atime = 4;
       * @return The atime.
       */
      @java.lang.Override
      public long getAtime() {
        return atime_;
      }
      /**
       * optional int64 atime = 4;
       * @param value The atime to set.
       * @return This builder for chaining.
       */
      public Builder setAtime(long value) {
        
        atime_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * optional int64 atime = 4;
       * @return This builder for chaining.
       */
      public Builder clearAtime() {
        bitField0_ = (bitField0_ & ~0x00000008);
        atime_ = 0L;
        onChanged();
        return this;
      }

      private int replication_ ;
      /**
       * optional int32 replication = 5;
       * @return Whether the replication field is set.
       */
      @java.lang.Override
      public boolean hasReplication() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * optional int32 replication = 5;
       * @return The replication.
       */
      @java.lang.Override
      public int getReplication() {
        return replication_;
      }
      /**
       * optional int32 replication = 5;
       * @param value The replication to set.
       * @return This builder for chaining.
       */
      public Builder setReplication(int value) {
        
        replication_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * optional int32 replication = 5;
       * @return This builder for chaining.
       */
      public Builder clearReplication() {
        bitField0_ = (bitField0_ & ~0x00000010);
        replication_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object ownerName_ = "";
      /**
       * optional string ownerName = 6;
       * @return Whether the ownerName field is set.
       */
      public boolean hasOwnerName() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * optional string ownerName = 6;
       * @return The ownerName.
       */
      public java.lang.String getOwnerName() {
        java.lang.Object ref = ownerName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            ownerName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * optional string ownerName = 6;
       * @return The bytes for ownerName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getOwnerNameBytes() {
        java.lang.Object ref = ownerName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          ownerName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * optional string ownerName = 6;
       * @param value The ownerName to set.
       * @return This builder for chaining.
       */
      public Builder setOwnerName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ownerName_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * optional string ownerName = 6;
       * @return This builder for chaining.
       */
      public Builder clearOwnerName() {
        ownerName_ = getDefaultInstance().getOwnerName();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
        return this;
      }
      /**
       * optional string ownerName = 6;
       * @param value The bytes for ownerName to set.
       * @return This builder for chaining.
       */
      public Builder setOwnerNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ownerName_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }

      private java.lang.Object groupName_ = "";
      /**
       * optional string groupName = 7;
       * @return Whether the groupName field is set.
       */
      public boolean hasGroupName() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * optional string groupName = 7;
       * @return The groupName.
       */
      public java.lang.String getGroupName() {
        java.lang.Object ref = groupName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            groupName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * optional string groupName = 7;
       * @return The bytes for groupName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getGroupNameBytes() {
        java.lang.Object ref = groupName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          groupName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * optional string groupName = 7;
       * @param value The groupName to set.
       * @return This builder for chaining.
       */
      public Builder setGroupName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        groupName_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * optional string groupName = 7;
       * @return This builder for chaining.
       */
      public Builder clearGroupName() {
        groupName_ = getDefaultInstance().getGroupName();
        bitField0_ = (bitField0_ & ~0x00000040);
        onChanged();
        return this;
      }
      /**
       * optional string groupName = 7;
       * @param value The bytes for groupName to set.
       * @return This builder for chaining.
       */
      public Builder setGroupNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        groupName_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto perms_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permsBuilder_;
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       * @return Whether the perms field is set.
       */
      public boolean hasPerms() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       * @return The perms.
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPerms() {
        if (permsBuilder_ == null) {
          return perms_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
        } else {
          return permsBuilder_.getMessage();
        }
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       */
      public Builder setPerms(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          perms_ = value;
        } else {
          permsBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       */
      public Builder setPerms(
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) {
        if (permsBuilder_ == null) {
          perms_ = builderForValue.build();
        } else {
          permsBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       */
      public Builder mergePerms(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permsBuilder_ == null) {
          if (((bitField0_ & 0x00000080) != 0) &&
            perms_ != null &&
            perms_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) {
            getPermsBuilder().mergeFrom(value);
          } else {
            perms_ = value;
          }
        } else {
          permsBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       */
      public Builder clearPerms() {
        bitField0_ = (bitField0_ & ~0x00000080);
        perms_ = null;
        if (permsBuilder_ != null) {
          permsBuilder_.dispose();
          permsBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermsBuilder() {
        bitField0_ |= 0x00000080;
        onChanged();
        return getPermsFieldBuilder().getBuilder();
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermsOrBuilder() {
        if (permsBuilder_ != null) {
          return permsBuilder_.getMessageOrBuilder();
        } else {
          return perms_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : perms_;
        }
      }
      /**
       * optional .hadoop.hdfs.FsPermissionProto perms = 8;
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> 
          getPermsFieldBuilder() {
        if (permsBuilder_ == null) {
          permsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>(
                  getPerms(),
                  getParentForChildren(),
                  isClean());
          perms_ = null;
        }
        return permsBuilder_;
      }

      private java.util.List acls_ =
        java.util.Collections.emptyList();
      private void ensureAclsIsMutable() {
        if (!((bitField0_ & 0x00000100) != 0)) {
          acls_ = new java.util.ArrayList(acls_);
          bitField0_ |= 0x00000100;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProtoOrBuilder> aclsBuilder_;

      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public java.util.List getAclsList() {
        if (aclsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(acls_);
        } else {
          return aclsBuilder_.getMessageList();
        }
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public int getAclsCount() {
        if (aclsBuilder_ == null) {
          return acls_.size();
        } else {
          return aclsBuilder_.getCount();
        }
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto getAcls(int index) {
        if (aclsBuilder_ == null) {
          return acls_.get(index);
        } else {
          return aclsBuilder_.getMessage(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder setAcls(
          int index, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto value) {
        if (aclsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureAclsIsMutable();
          acls_.set(index, value);
          onChanged();
        } else {
          aclsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder setAcls(
          int index, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder builderForValue) {
        if (aclsBuilder_ == null) {
          ensureAclsIsMutable();
          acls_.set(index, builderForValue.build());
          onChanged();
        } else {
          aclsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder addAcls(org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto value) {
        if (aclsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureAclsIsMutable();
          acls_.add(value);
          onChanged();
        } else {
          aclsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder addAcls(
          int index, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto value) {
        if (aclsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureAclsIsMutable();
          acls_.add(index, value);
          onChanged();
        } else {
          aclsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder addAcls(
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder builderForValue) {
        if (aclsBuilder_ == null) {
          ensureAclsIsMutable();
          acls_.add(builderForValue.build());
          onChanged();
        } else {
          aclsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder addAcls(
          int index, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder builderForValue) {
        if (aclsBuilder_ == null) {
          ensureAclsIsMutable();
          acls_.add(index, builderForValue.build());
          onChanged();
        } else {
          aclsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder addAllAcls(
          java.lang.Iterable values) {
        if (aclsBuilder_ == null) {
          ensureAclsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, acls_);
          onChanged();
        } else {
          aclsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder clearAcls() {
        if (aclsBuilder_ == null) {
          acls_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000100);
          onChanged();
        } else {
          aclsBuilder_.clear();
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public Builder removeAcls(int index) {
        if (aclsBuilder_ == null) {
          ensureAclsIsMutable();
          acls_.remove(index);
          onChanged();
        } else {
          aclsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder getAclsBuilder(
          int index) {
        return getAclsFieldBuilder().getBuilder(index);
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProtoOrBuilder getAclsOrBuilder(
          int index) {
        if (aclsBuilder_ == null) {
          return acls_.get(index);  } else {
          return aclsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public java.util.List 
           getAclsOrBuilderList() {
        if (aclsBuilder_ != null) {
          return aclsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(acls_);
        }
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder addAclsBuilder() {
        return getAclsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder addAclsBuilder(
          int index) {
        return getAclsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.AclEntryProto acls = 9;
       */
      public java.util.List 
           getAclsBuilderList() {
        return getAclsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProtoOrBuilder> 
          getAclsFieldBuilder() {
        if (aclsBuilder_ == null) {
          aclsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProtoOrBuilder>(
                  acls_,
                  ((bitField0_ & 0x00000100) != 0),
                  getParentForChildren(),
                  isClean());
          acls_ = null;
        }
        return aclsBuilder_;
      }

      private java.util.List xAttrs_ =
        java.util.Collections.emptyList();
      private void ensureXAttrsIsMutable() {
        if (!((bitField0_ & 0x00000200) != 0)) {
          xAttrs_ = new java.util.ArrayList(xAttrs_);
          bitField0_ |= 0x00000200;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProtoOrBuilder> xAttrsBuilder_;

      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public java.util.List getXAttrsList() {
        if (xAttrsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(xAttrs_);
        } else {
          return xAttrsBuilder_.getMessageList();
        }
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public int getXAttrsCount() {
        if (xAttrsBuilder_ == null) {
          return xAttrs_.size();
        } else {
          return xAttrsBuilder_.getCount();
        }
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto getXAttrs(int index) {
        if (xAttrsBuilder_ == null) {
          return xAttrs_.get(index);
        } else {
          return xAttrsBuilder_.getMessage(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder setXAttrs(
          int index, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto value) {
        if (xAttrsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureXAttrsIsMutable();
          xAttrs_.set(index, value);
          onChanged();
        } else {
          xAttrsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder setXAttrs(
          int index, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder builderForValue) {
        if (xAttrsBuilder_ == null) {
          ensureXAttrsIsMutable();
          xAttrs_.set(index, builderForValue.build());
          onChanged();
        } else {
          xAttrsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder addXAttrs(org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto value) {
        if (xAttrsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureXAttrsIsMutable();
          xAttrs_.add(value);
          onChanged();
        } else {
          xAttrsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder addXAttrs(
          int index, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto value) {
        if (xAttrsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureXAttrsIsMutable();
          xAttrs_.add(index, value);
          onChanged();
        } else {
          xAttrsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder addXAttrs(
          org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder builderForValue) {
        if (xAttrsBuilder_ == null) {
          ensureXAttrsIsMutable();
          xAttrs_.add(builderForValue.build());
          onChanged();
        } else {
          xAttrsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder addXAttrs(
          int index, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder builderForValue) {
        if (xAttrsBuilder_ == null) {
          ensureXAttrsIsMutable();
          xAttrs_.add(index, builderForValue.build());
          onChanged();
        } else {
          xAttrsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder addAllXAttrs(
          java.lang.Iterable values) {
        if (xAttrsBuilder_ == null) {
          ensureXAttrsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, xAttrs_);
          onChanged();
        } else {
          xAttrsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder clearXAttrs() {
        if (xAttrsBuilder_ == null) {
          xAttrs_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000200);
          onChanged();
        } else {
          xAttrsBuilder_.clear();
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public Builder removeXAttrs(int index) {
        if (xAttrsBuilder_ == null) {
          ensureXAttrsIsMutable();
          xAttrs_.remove(index);
          onChanged();
        } else {
          xAttrsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder getXAttrsBuilder(
          int index) {
        return getXAttrsFieldBuilder().getBuilder(index);
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProtoOrBuilder getXAttrsOrBuilder(
          int index) {
        if (xAttrsBuilder_ == null) {
          return xAttrs_.get(index);  } else {
          return xAttrsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public java.util.List 
           getXAttrsOrBuilderList() {
        if (xAttrsBuilder_ != null) {
          return xAttrsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(xAttrs_);
        }
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder addXAttrsBuilder() {
        return getXAttrsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder addXAttrsBuilder(
          int index) {
        return getXAttrsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.XAttrProto xAttrs = 10;
       */
      public java.util.List 
           getXAttrsBuilderList() {
        return getXAttrsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProtoOrBuilder> 
          getXAttrsFieldBuilder() {
        if (xAttrsBuilder_ == null) {
          xAttrsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.Builder, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProtoOrBuilder>(
                  xAttrs_,
                  ((bitField0_ & 0x00000200) != 0),
                  getParentForChildren(),
                  isClean());
          xAttrs_ = null;
        }
        return xAttrsBuilder_;
      }

      private boolean xAttrsRemoved_ ;
      /**
       * optional bool xAttrsRemoved = 11;
       * @return Whether the xAttrsRemoved field is set.
       */
      @java.lang.Override
      public boolean hasXAttrsRemoved() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * optional bool xAttrsRemoved = 11;
       * @return The xAttrsRemoved.
       */
      @java.lang.Override
      public boolean getXAttrsRemoved() {
        return xAttrsRemoved_;
      }
      /**
       * optional bool xAttrsRemoved = 11;
       * @param value The xAttrsRemoved to set.
       * @return This builder for chaining.
       */
      public Builder setXAttrsRemoved(boolean value) {
        
        xAttrsRemoved_ = value;
        bitField0_ |= 0x00000400;
        onChanged();
        return this;
      }
      /**
       * optional bool xAttrsRemoved = 11;
       * @return This builder for chaining.
       */
      public Builder clearXAttrsRemoved() {
        bitField0_ = (bitField0_ & ~0x00000400);
        xAttrsRemoved_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.MetadataUpdateEventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.MetadataUpdateEventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public MetadataUpdateEventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.MetadataUpdateEventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface UnlinkEventProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.UnlinkEventProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * required string path = 1;
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * required int64 timestamp = 2;
     * @return Whether the timestamp field is set.
     */
    boolean hasTimestamp();
    /**
     * required int64 timestamp = 2;
     * @return The timestamp.
     */
    long getTimestamp();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.UnlinkEventProto}
   */
  public static final class UnlinkEventProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.UnlinkEventProto)
      UnlinkEventProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use UnlinkEventProto.newBuilder() to construct.
    private UnlinkEventProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) {
      super(builder);
    }
    private UnlinkEventProto() {
      path_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new UnlinkEventProto();
    }

    @java.lang.Override
    public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
    getUnknownFields() {
      return this.unknownFields;
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_UnlinkEventProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_UnlinkEventProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto.Builder.class);
    }

    private int bitField0_;
    public static final int PATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * required string path = 1;
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * required string path = 1;
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * required string path = 1;
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int TIMESTAMP_FIELD_NUMBER = 2;
    private long timestamp_ = 0L;
    /**
     * required int64 timestamp = 2;
     * @return Whether the timestamp field is set.
     */
    @java.lang.Override
    public boolean hasTimestamp() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * required int64 timestamp = 2;
     * @return The timestamp.
     */
    @java.lang.Override
    public long getTimestamp() {
      return timestamp_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasTimestamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt64(2, timestamp_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(2, timestamp_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto) obj;

      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasTimestamp() != other.hasTimestamp()) return false;
      if (hasTimestamp()) {
        if (getTimestamp()
            != other.getTimestamp()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasTimestamp()) {
        hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTimestamp());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.UnlinkEventProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.UnlinkEventProto)
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_UnlinkEventProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_UnlinkEventProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        path_ = "";
        timestamp_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_UnlinkEventProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto build() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.timestamp_ = timestamp_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto.getDefaultInstance()) return this;
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasTimestamp()) {
          setTimestamp(other.getTimestamp());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPath()) {
          return false;
        }
        if (!hasTimestamp()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                timestamp_ = input.readInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object path_ = "";
      /**
       * required string path = 1;
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * required string path = 1;
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string path = 1;
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * required string path = 1;
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * required string path = 1;
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long timestamp_ ;
      /**
       * required int64 timestamp = 2;
       * @return Whether the timestamp field is set.
       */
      @java.lang.Override
      public boolean hasTimestamp() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * required int64 timestamp = 2;
       * @return The timestamp.
       */
      @java.lang.Override
      public long getTimestamp() {
        return timestamp_;
      }
      /**
       * required int64 timestamp = 2;
       * @param value The timestamp to set.
       * @return This builder for chaining.
       */
      public Builder setTimestamp(long value) {
        
        timestamp_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * required int64 timestamp = 2;
       * @return This builder for chaining.
       */
      public Builder clearTimestamp() {
        bitField0_ = (bitField0_ & ~0x00000002);
        timestamp_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UnlinkEventProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.UnlinkEventProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
      @java.lang.Override
      public UnlinkEventProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.UnlinkEventProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface EventsListProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.EventsListProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * 
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ java.util.List getEventsList(); /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getEvents(int index); /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ int getEventsCount(); /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ java.util.List getEventsOrBuilderList(); /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder getEventsOrBuilder( int index); /** * required int64 firstTxid = 2; * @return Whether the firstTxid field is set. */ boolean hasFirstTxid(); /** * required int64 firstTxid = 2; * @return The firstTxid. */ long getFirstTxid(); /** * required int64 lastTxid = 3; * @return Whether the lastTxid field is set. */ boolean hasLastTxid(); /** * required int64 lastTxid = 3; * @return The lastTxid. */ long getLastTxid(); /** * required int64 syncTxid = 4; * @return Whether the syncTxid field is set. */ boolean hasSyncTxid(); /** * required int64 syncTxid = 4; * @return The syncTxid. */ long getSyncTxid(); /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ java.util.List getBatchList(); /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto getBatch(int index); /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ int getBatchCount(); /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ java.util.List getBatchOrBuilderList(); /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProtoOrBuilder getBatchOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.EventsListProto} */ public static final class EventsListProto extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:hadoop.hdfs.EventsListProto) EventsListProtoOrBuilder { private static final long serialVersionUID = 0L; // Use EventsListProto.newBuilder() to construct. private EventsListProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private EventsListProto() { events_ = java.util.Collections.emptyList(); batch_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new EventsListProto(); } @java.lang.Override public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventsListProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventsListProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder.class); } private int bitField0_; public static final int EVENTS_FIELD_NUMBER = 1; @SuppressWarnings("serial") private java.util.List events_; /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ @java.lang.Override public java.util.List getEventsList() { return events_; } /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ @java.lang.Override public java.util.List getEventsOrBuilderList() { return events_; } /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ @java.lang.Override public int getEventsCount() { return events_.size(); } /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getEvents(int index) { return events_.get(index); } /** *
     * deprecated
     * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder getEventsOrBuilder( int index) { return events_.get(index); } public static final int FIRSTTXID_FIELD_NUMBER = 2; private long firstTxid_ = 0L; /** * required int64 firstTxid = 2; * @return Whether the firstTxid field is set. */ @java.lang.Override public boolean hasFirstTxid() { return ((bitField0_ & 0x00000001) != 0); } /** * required int64 firstTxid = 2; * @return The firstTxid. */ @java.lang.Override public long getFirstTxid() { return firstTxid_; } public static final int LASTTXID_FIELD_NUMBER = 3; private long lastTxid_ = 0L; /** * required int64 lastTxid = 3; * @return Whether the lastTxid field is set. */ @java.lang.Override public boolean hasLastTxid() { return ((bitField0_ & 0x00000002) != 0); } /** * required int64 lastTxid = 3; * @return The lastTxid. */ @java.lang.Override public long getLastTxid() { return lastTxid_; } public static final int SYNCTXID_FIELD_NUMBER = 4; private long syncTxid_ = 0L; /** * required int64 syncTxid = 4; * @return Whether the syncTxid field is set. */ @java.lang.Override public boolean hasSyncTxid() { return ((bitField0_ & 0x00000004) != 0); } /** * required int64 syncTxid = 4; * @return The syncTxid. */ @java.lang.Override public long getSyncTxid() { return syncTxid_; } public static final int BATCH_FIELD_NUMBER = 5; @SuppressWarnings("serial") private java.util.List batch_; /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ @java.lang.Override public java.util.List getBatchList() { return batch_; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ @java.lang.Override public java.util.List getBatchOrBuilderList() { return batch_; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ @java.lang.Override public int getBatchCount() { return batch_.size(); } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto getBatch(int index) { return batch_.get(index); } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProtoOrBuilder getBatchOrBuilder( int index) { return batch_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; if (!hasFirstTxid()) { memoizedIsInitialized = 0; return false; } if (!hasLastTxid()) { memoizedIsInitialized = 0; return false; } if (!hasSyncTxid()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getEventsCount(); i++) { if (!getEvents(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getBatchCount(); i++) { if (!getBatch(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < events_.size(); i++) { output.writeMessage(1, events_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { output.writeInt64(2, firstTxid_); } if (((bitField0_ & 0x00000002) != 0)) { output.writeInt64(3, lastTxid_); } if (((bitField0_ & 0x00000004) != 0)) { output.writeInt64(4, syncTxid_); } for (int i = 0; i < batch_.size(); i++) { output.writeMessage(5, batch_.get(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < events_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(1, events_.get(i)); } if (((bitField0_ & 0x00000001) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(2, firstTxid_); } if (((bitField0_ & 0x00000002) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(3, lastTxid_); } if (((bitField0_ & 0x00000004) != 0)) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeInt64Size(4, syncTxid_); } for (int i = 0; i < batch_.size(); i++) { size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream .computeMessageSize(5, batch_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto other = (org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto) obj; if (!getEventsList() .equals(other.getEventsList())) return false; if (hasFirstTxid() != other.hasFirstTxid()) return false; if (hasFirstTxid()) { if (getFirstTxid() != other.getFirstTxid()) return false; } if (hasLastTxid() != other.hasLastTxid()) return false; if (hasLastTxid()) { if (getLastTxid() != other.getLastTxid()) return false; } if (hasSyncTxid() != other.hasSyncTxid()) return false; if (hasSyncTxid()) { if (getSyncTxid() != other.getSyncTxid()) return false; } if (!getBatchList() .equals(other.getBatchList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getEventsCount() > 0) { hash = (37 * hash) + EVENTS_FIELD_NUMBER; hash = (53 * hash) + getEventsList().hashCode(); } if (hasFirstTxid()) { hash = (37 * hash) + FIRSTTXID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getFirstTxid()); } if (hasLastTxid()) { hash = (37 * hash) + LASTTXID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getLastTxid()); } if (hasSyncTxid()) { hash = (37 * hash) + SYNCTXID_FIELD_NUMBER; hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong( getSyncTxid()); } if (getBatchCount() > 0) { hash = (37 * hash) + BATCH_FIELD_NUMBER; hash = (53 * hash) + getBatchList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( java.nio.ByteBuffer data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( java.nio.ByteBuffer data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( org.apache.hadoop.thirdparty.protobuf.ByteString data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom(byte[] data) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( byte[] data, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto parseFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.EventsListProto} */ public static final class Builder extends org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:hadoop.hdfs.EventsListProto) org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProtoOrBuilder { public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventsListProto_descriptor; } @java.lang.Override protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventsListProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.class, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.newBuilder() private Builder() { } private Builder( org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (eventsBuilder_ == null) { events_ = java.util.Collections.emptyList(); } else { events_ = null; eventsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); firstTxid_ = 0L; lastTxid_ = 0L; syncTxid_ = 0L; if (batchBuilder_ == null) { batch_ = java.util.Collections.emptyList(); } else { batch_ = null; batchBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.internal_static_hadoop_hdfs_EventsListProto_descriptor; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.getDefaultInstance(); } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto build() { org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto result = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto result) { if (eventsBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { events_ = java.util.Collections.unmodifiableList(events_); bitField0_ = (bitField0_ & ~0x00000001); } result.events_ = events_; } else { result.events_ = eventsBuilder_.build(); } if (batchBuilder_ == null) { if (((bitField0_ & 0x00000010) != 0)) { batch_ = java.util.Collections.unmodifiableList(batch_); bitField0_ = (bitField0_ & ~0x00000010); } result.batch_ = batch_; } else { result.batch_ = batchBuilder_.build(); } } private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000002) != 0)) { result.firstTxid_ = firstTxid_; to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000004) != 0)) { result.lastTxid_ = lastTxid_; to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000008) != 0)) { result.syncTxid_ = syncTxid_; to_bitField0_ |= 0x00000004; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.getDefaultInstance()) return this; if (eventsBuilder_ == null) { if (!other.events_.isEmpty()) { if (events_.isEmpty()) { events_ = other.events_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureEventsIsMutable(); events_.addAll(other.events_); } onChanged(); } } else { if (!other.events_.isEmpty()) { if (eventsBuilder_.isEmpty()) { eventsBuilder_.dispose(); eventsBuilder_ = null; events_ = other.events_; bitField0_ = (bitField0_ & ~0x00000001); eventsBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getEventsFieldBuilder() : null; } else { eventsBuilder_.addAllMessages(other.events_); } } } if (other.hasFirstTxid()) { setFirstTxid(other.getFirstTxid()); } if (other.hasLastTxid()) { setLastTxid(other.getLastTxid()); } if (other.hasSyncTxid()) { setSyncTxid(other.getSyncTxid()); } if (batchBuilder_ == null) { if (!other.batch_.isEmpty()) { if (batch_.isEmpty()) { batch_ = other.batch_; bitField0_ = (bitField0_ & ~0x00000010); } else { ensureBatchIsMutable(); batch_.addAll(other.batch_); } onChanged(); } } else { if (!other.batch_.isEmpty()) { if (batchBuilder_.isEmpty()) { batchBuilder_.dispose(); batchBuilder_ = null; batch_ = other.batch_; bitField0_ = (bitField0_ & ~0x00000010); batchBuilder_ = org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getBatchFieldBuilder() : null; } else { batchBuilder_.addAllMessages(other.batch_); } } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { if (!hasFirstTxid()) { return false; } if (!hasLastTxid()) { return false; } if (!hasSyncTxid()) { return false; } for (int i = 0; i < getEventsCount(); i++) { if (!getEvents(i).isInitialized()) { return false; } } for (int i = 0; i < getBatchCount(); i++) { if (!getBatch(i).isInitialized()) { return false; } } return true; } @java.lang.Override public Builder mergeFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.PARSER, extensionRegistry); if (eventsBuilder_ == null) { ensureEventsIsMutable(); events_.add(m); } else { eventsBuilder_.addMessage(m); } break; } // case 10 case 16: { firstTxid_ = input.readInt64(); bitField0_ |= 0x00000002; break; } // case 16 case 24: { lastTxid_ = input.readInt64(); bitField0_ |= 0x00000004; break; } // case 24 case 32: { syncTxid_ = input.readInt64(); bitField0_ |= 0x00000008; break; } // case 32 case 42: { org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto m = input.readMessage( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.PARSER, extensionRegistry); if (batchBuilder_ == null) { ensureBatchIsMutable(); batch_.add(m); } else { batchBuilder_.addMessage(m); } break; } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.util.List events_ = java.util.Collections.emptyList(); private void ensureEventsIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { events_ = new java.util.ArrayList(events_); bitField0_ |= 0x00000001; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder> eventsBuilder_; /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public java.util.List getEventsList() { if (eventsBuilder_ == null) { return java.util.Collections.unmodifiableList(events_); } else { return eventsBuilder_.getMessageList(); } } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public int getEventsCount() { if (eventsBuilder_ == null) { return events_.size(); } else { return eventsBuilder_.getCount(); } } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto getEvents(int index) { if (eventsBuilder_ == null) { return events_.get(index); } else { return eventsBuilder_.getMessage(index); } } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder setEvents( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto value) { if (eventsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEventsIsMutable(); events_.set(index, value); onChanged(); } else { eventsBuilder_.setMessage(index, value); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder setEvents( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder builderForValue) { if (eventsBuilder_ == null) { ensureEventsIsMutable(); events_.set(index, builderForValue.build()); onChanged(); } else { eventsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder addEvents(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto value) { if (eventsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEventsIsMutable(); events_.add(value); onChanged(); } else { eventsBuilder_.addMessage(value); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder addEvents( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto value) { if (eventsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEventsIsMutable(); events_.add(index, value); onChanged(); } else { eventsBuilder_.addMessage(index, value); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder addEvents( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder builderForValue) { if (eventsBuilder_ == null) { ensureEventsIsMutable(); events_.add(builderForValue.build()); onChanged(); } else { eventsBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder addEvents( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder builderForValue) { if (eventsBuilder_ == null) { ensureEventsIsMutable(); events_.add(index, builderForValue.build()); onChanged(); } else { eventsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder addAllEvents( java.lang.Iterable values) { if (eventsBuilder_ == null) { ensureEventsIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, events_); onChanged(); } else { eventsBuilder_.addAllMessages(values); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder clearEvents() { if (eventsBuilder_ == null) { events_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { eventsBuilder_.clear(); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public Builder removeEvents(int index) { if (eventsBuilder_ == null) { ensureEventsIsMutable(); events_.remove(index); onChanged(); } else { eventsBuilder_.remove(index); } return this; } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder getEventsBuilder( int index) { return getEventsFieldBuilder().getBuilder(index); } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder getEventsOrBuilder( int index) { if (eventsBuilder_ == null) { return events_.get(index); } else { return eventsBuilder_.getMessageOrBuilder(index); } } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public java.util.List getEventsOrBuilderList() { if (eventsBuilder_ != null) { return eventsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(events_); } } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder addEventsBuilder() { return getEventsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.getDefaultInstance()); } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder addEventsBuilder( int index) { return getEventsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.getDefaultInstance()); } /** *
       * deprecated
       * 
* * repeated .hadoop.hdfs.EventProto events = 1; */ public java.util.List getEventsBuilderList() { return getEventsFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder> getEventsFieldBuilder() { if (eventsBuilder_ == null) { eventsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventProtoOrBuilder>( events_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); events_ = null; } return eventsBuilder_; } private long firstTxid_ ; /** * required int64 firstTxid = 2; * @return Whether the firstTxid field is set. */ @java.lang.Override public boolean hasFirstTxid() { return ((bitField0_ & 0x00000002) != 0); } /** * required int64 firstTxid = 2; * @return The firstTxid. */ @java.lang.Override public long getFirstTxid() { return firstTxid_; } /** * required int64 firstTxid = 2; * @param value The firstTxid to set. * @return This builder for chaining. */ public Builder setFirstTxid(long value) { firstTxid_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * required int64 firstTxid = 2; * @return This builder for chaining. */ public Builder clearFirstTxid() { bitField0_ = (bitField0_ & ~0x00000002); firstTxid_ = 0L; onChanged(); return this; } private long lastTxid_ ; /** * required int64 lastTxid = 3; * @return Whether the lastTxid field is set. */ @java.lang.Override public boolean hasLastTxid() { return ((bitField0_ & 0x00000004) != 0); } /** * required int64 lastTxid = 3; * @return The lastTxid. */ @java.lang.Override public long getLastTxid() { return lastTxid_; } /** * required int64 lastTxid = 3; * @param value The lastTxid to set. * @return This builder for chaining. */ public Builder setLastTxid(long value) { lastTxid_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * required int64 lastTxid = 3; * @return This builder for chaining. */ public Builder clearLastTxid() { bitField0_ = (bitField0_ & ~0x00000004); lastTxid_ = 0L; onChanged(); return this; } private long syncTxid_ ; /** * required int64 syncTxid = 4; * @return Whether the syncTxid field is set. */ @java.lang.Override public boolean hasSyncTxid() { return ((bitField0_ & 0x00000008) != 0); } /** * required int64 syncTxid = 4; * @return The syncTxid. */ @java.lang.Override public long getSyncTxid() { return syncTxid_; } /** * required int64 syncTxid = 4; * @param value The syncTxid to set. * @return This builder for chaining. */ public Builder setSyncTxid(long value) { syncTxid_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * required int64 syncTxid = 4; * @return This builder for chaining. */ public Builder clearSyncTxid() { bitField0_ = (bitField0_ & ~0x00000008); syncTxid_ = 0L; onChanged(); return this; } private java.util.List batch_ = java.util.Collections.emptyList(); private void ensureBatchIsMutable() { if (!((bitField0_ & 0x00000010) != 0)) { batch_ = new java.util.ArrayList(batch_); bitField0_ |= 0x00000010; } } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProtoOrBuilder> batchBuilder_; /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public java.util.List getBatchList() { if (batchBuilder_ == null) { return java.util.Collections.unmodifiableList(batch_); } else { return batchBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public int getBatchCount() { if (batchBuilder_ == null) { return batch_.size(); } else { return batchBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto getBatch(int index) { if (batchBuilder_ == null) { return batch_.get(index); } else { return batchBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder setBatch( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto value) { if (batchBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBatchIsMutable(); batch_.set(index, value); onChanged(); } else { batchBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder setBatch( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder builderForValue) { if (batchBuilder_ == null) { ensureBatchIsMutable(); batch_.set(index, builderForValue.build()); onChanged(); } else { batchBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder addBatch(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto value) { if (batchBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBatchIsMutable(); batch_.add(value); onChanged(); } else { batchBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder addBatch( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto value) { if (batchBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBatchIsMutable(); batch_.add(index, value); onChanged(); } else { batchBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder addBatch( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder builderForValue) { if (batchBuilder_ == null) { ensureBatchIsMutable(); batch_.add(builderForValue.build()); onChanged(); } else { batchBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder addBatch( int index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder builderForValue) { if (batchBuilder_ == null) { ensureBatchIsMutable(); batch_.add(index, builderForValue.build()); onChanged(); } else { batchBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder addAllBatch( java.lang.Iterable values) { if (batchBuilder_ == null) { ensureBatchIsMutable(); org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll( values, batch_); onChanged(); } else { batchBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder clearBatch() { if (batchBuilder_ == null) { batch_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); } else { batchBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public Builder removeBatch(int index) { if (batchBuilder_ == null) { ensureBatchIsMutable(); batch_.remove(index); onChanged(); } else { batchBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder getBatchBuilder( int index) { return getBatchFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProtoOrBuilder getBatchOrBuilder( int index) { if (batchBuilder_ == null) { return batch_.get(index); } else { return batchBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public java.util.List getBatchOrBuilderList() { if (batchBuilder_ != null) { return batchBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(batch_); } } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder addBatchBuilder() { return getBatchFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder addBatchBuilder( int index) { return getBatchFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.EventBatchProto batch = 5; */ public java.util.List getBatchBuilderList() { return getBatchFieldBuilder().getBuilderList(); } private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProtoOrBuilder> getBatchFieldBuilder() { if (batchBuilder_ == null) { batchBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventBatchProtoOrBuilder>( batch_, ((bitField0_ & 0x00000010) != 0), getParentForChildren(), isClean()); batch_ = null; } return batchBuilder_; } @java.lang.Override public final Builder setUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.EventsListProto) } // @@protoc_insertion_point(class_scope:hadoop.hdfs.EventsListProto) private static final org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto(); } public static org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() { @java.lang.Override public EventsListProto parsePartialFrom( org.apache.hadoop.thirdparty.protobuf.CodedInputStream input, org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static org.apache.hadoop.thirdparty.protobuf.Parser parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_EventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_EventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_EventBatchProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_EventBatchProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CreateEventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_CreateEventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CloseEventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_CloseEventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_TruncateEventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_TruncateEventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AppendEventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_AppendEventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RenameEventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_RenameEventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_MetadataUpdateEventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_MetadataUpdateEventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UnlinkEventProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_UnlinkEventProto_fieldAccessorTable; private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_EventsListProto_descriptor; private static final org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hadoop_hdfs_EventsListProto_fieldAccessorTable; public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\rinotify.proto\022\013hadoop.hdfs\032\tacl.proto\032" + "\013xattr.proto\"D\n\nEventProto\022$\n\004type\030\001 \002(\016" + "2\026.hadoop.hdfs.EventType\022\020\n\010contents\030\002 \002" + "(\014\"H\n\017EventBatchProto\022\014\n\004txid\030\001 \002(\003\022\'\n\006e" + "vents\030\002 \003(\0132\027.hadoop.hdfs.EventProto\"\234\002\n" + "\020CreateEventProto\022$\n\004type\030\001 \002(\0162\026.hadoop" + ".hdfs.INodeType\022\014\n\004path\030\002 \002(\t\022\r\n\005ctime\030\003" + " \002(\003\022\021\n\townerName\030\004 \002(\t\022\021\n\tgroupName\030\005 \002" + "(\t\022-\n\005perms\030\006 \002(\0132\036.hadoop.hdfs.FsPermis" + "sionProto\022\023\n\013replication\030\007 \001(\005\022\025\n\rsymlin" + "kTarget\030\010 \001(\t\022\021\n\toverwrite\030\t \001(\010\022\033\n\020defa" + "ultBlockSize\030\n \001(\003:\0010\022\024\n\014erasureCoded\030\013 " + "\001(\010\"D\n\017CloseEventProto\022\014\n\004path\030\001 \002(\t\022\020\n\010" + "fileSize\030\002 \002(\003\022\021\n\ttimestamp\030\003 \002(\003\"G\n\022Tru" + "ncateEventProto\022\014\n\004path\030\001 \002(\t\022\020\n\010fileSiz" + "e\030\002 \002(\003\022\021\n\ttimestamp\030\003 \002(\003\"9\n\020AppendEven" + "tProto\022\014\n\004path\030\001 \002(\t\022\027\n\010newBlock\030\002 \001(\010:\005" + "false\"H\n\020RenameEventProto\022\017\n\007srcPath\030\001 \002" + "(\t\022\020\n\010destPath\030\002 \002(\t\022\021\n\ttimestamp\030\003 \002(\003\"" + "\311\002\n\030MetadataUpdateEventProto\022\014\n\004path\030\001 \002" + "(\t\022-\n\004type\030\002 \002(\0162\037.hadoop.hdfs.MetadataU" + "pdateType\022\r\n\005mtime\030\003 \001(\003\022\r\n\005atime\030\004 \001(\003\022" + "\023\n\013replication\030\005 \001(\005\022\021\n\townerName\030\006 \001(\t\022" + "\021\n\tgroupName\030\007 \001(\t\022-\n\005perms\030\010 \001(\0132\036.hado" + "op.hdfs.FsPermissionProto\022(\n\004acls\030\t \003(\0132" + "\032.hadoop.hdfs.AclEntryProto\022\'\n\006xAttrs\030\n " + "\003(\0132\027.hadoop.hdfs.XAttrProto\022\025\n\rxAttrsRe" + "moved\030\013 \001(\010\"3\n\020UnlinkEventProto\022\014\n\004path\030" + "\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\003\"\236\001\n\017EventsListP" + "roto\022\'\n\006events\030\001 \003(\0132\027.hadoop.hdfs.Event" + "Proto\022\021\n\tfirstTxid\030\002 \002(\003\022\020\n\010lastTxid\030\003 \002" + "(\003\022\020\n\010syncTxid\030\004 \002(\003\022+\n\005batch\030\005 \003(\0132\034.ha" + "doop.hdfs.EventBatchProto*\214\001\n\tEventType\022" + "\020\n\014EVENT_CREATE\020\000\022\017\n\013EVENT_CLOSE\020\001\022\020\n\014EV" + "ENT_APPEND\020\002\022\020\n\014EVENT_RENAME\020\003\022\022\n\016EVENT_" + "METADATA\020\004\022\020\n\014EVENT_UNLINK\020\005\022\022\n\016EVENT_TR" + "UNCATE\020\006*F\n\tINodeType\022\017\n\013I_TYPE_FILE\020\000\022\024" + "\n\020I_TYPE_DIRECTORY\020\001\022\022\n\016I_TYPE_SYMLINK\020\002" + "*\230\001\n\022MetadataUpdateType\022\023\n\017META_TYPE_TIM" + "ES\020\000\022\031\n\025META_TYPE_REPLICATION\020\001\022\023\n\017META_" + "TYPE_OWNER\020\002\022\023\n\017META_TYPE_PERMS\020\003\022\022\n\016MET" + "A_TYPE_ACLS\020\004\022\024\n\020META_TYPE_XATTRS\020\005B9\n%o" + "rg.apache.hadoop.hdfs.protocol.protoB\rIn" + "otifyProtos\240\001\001" }; descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor(), }); internal_static_hadoop_hdfs_EventProto_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_EventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_EventProto_descriptor, new java.lang.String[] { "Type", "Contents", }); internal_static_hadoop_hdfs_EventBatchProto_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_EventBatchProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_EventBatchProto_descriptor, new java.lang.String[] { "Txid", "Events", }); internal_static_hadoop_hdfs_CreateEventProto_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_CreateEventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_CreateEventProto_descriptor, new java.lang.String[] { "Type", "Path", "Ctime", "OwnerName", "GroupName", "Perms", "Replication", "SymlinkTarget", "Overwrite", "DefaultBlockSize", "ErasureCoded", }); internal_static_hadoop_hdfs_CloseEventProto_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_CloseEventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_CloseEventProto_descriptor, new java.lang.String[] { "Path", "FileSize", "Timestamp", }); internal_static_hadoop_hdfs_TruncateEventProto_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_TruncateEventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_TruncateEventProto_descriptor, new java.lang.String[] { "Path", "FileSize", "Timestamp", }); internal_static_hadoop_hdfs_AppendEventProto_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_AppendEventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_AppendEventProto_descriptor, new java.lang.String[] { "Path", "NewBlock", }); internal_static_hadoop_hdfs_RenameEventProto_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_RenameEventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_RenameEventProto_descriptor, new java.lang.String[] { "SrcPath", "DestPath", "Timestamp", }); internal_static_hadoop_hdfs_MetadataUpdateEventProto_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_MetadataUpdateEventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_MetadataUpdateEventProto_descriptor, new java.lang.String[] { "Path", "Type", "Mtime", "Atime", "Replication", "OwnerName", "GroupName", "Perms", "Acls", "XAttrs", "XAttrsRemoved", }); internal_static_hadoop_hdfs_UnlinkEventProto_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_UnlinkEventProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_UnlinkEventProto_descriptor, new java.lang.String[] { "Path", "Timestamp", }); internal_static_hadoop_hdfs_EventsListProto_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_EventsListProto_fieldAccessorTable = new org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hadoop_hdfs_EventsListProto_descriptor, new java.lang.String[] { "Events", "FirstTxid", "LastTxid", "SyncTxid", "Batch", }); org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(); org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy