All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos Maven / Gradle / Ivy

There is a newer version: 3.2.0-9
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: AliasMapProtocol.proto

package org.apache.hadoop.hdfs.protocol.proto;

public final class AliasMapProtocolProtos {
  private AliasMapProtocolProtos() {}
  public static void registerAllExtensions(
      io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistry registry) {
  }
  public interface KeyValueProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {

    // optional .hadoop.hdfs.BlockProto key = 1;
    /**
     * optional .hadoop.hdfs.BlockProto key = 1;
     */
    boolean hasKey();
    /**
     * optional .hadoop.hdfs.BlockProto key = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getKey();
    /**
     * optional .hadoop.hdfs.BlockProto key = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getKeyOrBuilder();

    // optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
     */
    boolean hasValue();
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getValue();
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder getValueOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.KeyValueProto}
   */
  public static final class KeyValueProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements KeyValueProtoOrBuilder {
    // Use KeyValueProto.newBuilder() to construct.
    private KeyValueProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private KeyValueProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final KeyValueProto defaultInstance;
    public static KeyValueProto getDefaultInstance() {
      return defaultInstance;
    }

    public KeyValueProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private KeyValueProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null;
              if (((bitField0_ & 0x00000001) == 0x00000001)) {
                subBuilder = key_.toBuilder();
              }
              key_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(key_);
                key_ = subBuilder.buildPartial();
              }
              bitField0_ |= 0x00000001;
              break;
            }
            case 18: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder subBuilder = null;
              if (((bitField0_ & 0x00000002) == 0x00000002)) {
                subBuilder = value_.toBuilder();
              }
              value_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.PARSER, extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(value_);
                value_ = subBuilder.buildPartial();
              }
              bitField0_ |= 0x00000002;
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_KeyValueProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_KeyValueProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public KeyValueProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new KeyValueProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // optional .hadoop.hdfs.BlockProto key = 1;
    public static final int KEY_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto key_;
    /**
     * optional .hadoop.hdfs.BlockProto key = 1;
     */
    public boolean hasKey() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * optional .hadoop.hdfs.BlockProto key = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getKey() {
      return key_;
    }
    /**
     * optional .hadoop.hdfs.BlockProto key = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getKeyOrBuilder() {
      return key_;
    }

    // optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
    public static final int VALUE_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value_;
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
     */
    public boolean hasValue() {
      return ((bitField0_ & 0x00000002) == 0x00000002);
    }
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getValue() {
      return value_;
    }
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder getValueOrBuilder() {
      return value_;
    }

    private void initFields() {
      key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
      value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      if (hasKey()) {
        if (!getKey().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasValue()) {
        if (!getValue().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, key_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        output.writeMessage(2, value_);
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, key_);
      }
      if (((bitField0_ & 0x00000002) == 0x00000002)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(2, value_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto) obj;

      boolean result = true;
      result = result && (hasKey() == other.hasKey());
      if (hasKey()) {
        result = result && getKey()
            .equals(other.getKey());
      }
      result = result && (hasValue() == other.hasValue());
      if (hasValue()) {
        result = result && getValue()
            .equals(other.getValue());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasKey()) {
        hash = (37 * hash) + KEY_FIELD_NUMBER;
        hash = (53 * hash) + getKey().hashCode();
      }
      if (hasValue()) {
        hash = (37 * hash) + VALUE_FIELD_NUMBER;
        hash = (53 * hash) + getValue().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.KeyValueProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_KeyValueProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_KeyValueProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getKeyFieldBuilder();
          getValueFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        if (keyBuilder_ == null) {
          key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
        } else {
          keyBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        if (valueBuilder_ == null) {
          value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
        } else {
          valueBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_KeyValueProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (keyBuilder_ == null) {
          result.key_ = key_;
        } else {
          result.key_ = keyBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000002;
        }
        if (valueBuilder_ == null) {
          result.value_ = value_;
        } else {
          result.value_ = valueBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance()) return this;
        if (other.hasKey()) {
          mergeKey(other.getKey());
        }
        if (other.hasValue()) {
          mergeValue(other.getValue());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        if (hasKey()) {
          if (!getKey().isInitialized()) {
            
            return false;
          }
        }
        if (hasValue()) {
          if (!getValue().isInitialized()) {
            
            return false;
          }
        }
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // optional .hadoop.hdfs.BlockProto key = 1;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> keyBuilder_;
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public boolean hasKey() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getKey() {
        if (keyBuilder_ == null) {
          return key_;
        } else {
          return keyBuilder_.getMessage();
        }
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder setKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (keyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          key_ = value;
          onChanged();
        } else {
          keyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder setKey(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (keyBuilder_ == null) {
          key_ = builderForValue.build();
          onChanged();
        } else {
          keyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder mergeKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (keyBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              key_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
            key_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(key_).mergeFrom(value).buildPartial();
          } else {
            key_ = value;
          }
          onChanged();
        } else {
          keyBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder clearKey() {
        if (keyBuilder_ == null) {
          key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
          onChanged();
        } else {
          keyBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getKeyBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getKeyFieldBuilder().getBuilder();
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getKeyOrBuilder() {
        if (keyBuilder_ != null) {
          return keyBuilder_.getMessageOrBuilder();
        } else {
          return key_;
        }
      }
      /**
       * optional .hadoop.hdfs.BlockProto key = 1;
       */
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getKeyFieldBuilder() {
        if (keyBuilder_ == null) {
          keyBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                  key_,
                  getParentForChildren(),
                  isClean());
          key_ = null;
        }
        return keyBuilder_;
      }

      // optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder> valueBuilder_;
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public boolean hasValue() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getValue() {
        if (valueBuilder_ == null) {
          return value_;
        } else {
          return valueBuilder_.getMessage();
        }
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public Builder setValue(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value) {
        if (valueBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          value_ = value;
          onChanged();
        } else {
          valueBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public Builder setValue(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder builderForValue) {
        if (valueBuilder_ == null) {
          value_ = builderForValue.build();
          onChanged();
        } else {
          valueBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public Builder mergeValue(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value) {
        if (valueBuilder_ == null) {
          if (((bitField0_ & 0x00000002) == 0x00000002) &&
              value_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance()) {
            value_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.newBuilder(value_).mergeFrom(value).buildPartial();
          } else {
            value_ = value;
          }
          onChanged();
        } else {
          valueBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public Builder clearValue() {
        if (valueBuilder_ == null) {
          value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
          onChanged();
        } else {
          valueBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder getValueBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getValueFieldBuilder().getBuilder();
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder getValueOrBuilder() {
        if (valueBuilder_ != null) {
          return valueBuilder_.getMessageOrBuilder();
        } else {
          return value_;
        }
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 2;
       */
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder> 
          getValueFieldBuilder() {
        if (valueBuilder_ == null) {
          valueBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder>(
                  value_,
                  getParentForChildren(),
                  isClean());
          value_ = null;
        }
        return valueBuilder_;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.KeyValueProto)
    }

    static {
      defaultInstance = new KeyValueProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.KeyValueProto)
  }

  public interface WriteRequestProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {

    // required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
    /**
     * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
     */
    boolean hasKeyValuePair();
    /**
     * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto getKeyValuePair();
    /**
     * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder getKeyValuePairOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.WriteRequestProto}
   */
  public static final class WriteRequestProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements WriteRequestProtoOrBuilder {
    // Use WriteRequestProto.newBuilder() to construct.
    private WriteRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private WriteRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final WriteRequestProto defaultInstance;
    public static WriteRequestProto getDefaultInstance() {
      return defaultInstance;
    }

    public WriteRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private WriteRequestProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder subBuilder = null;
              if (((bitField0_ & 0x00000001) == 0x00000001)) {
                subBuilder = keyValuePair_.toBuilder();
              }
              keyValuePair_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.PARSER, extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(keyValuePair_);
                keyValuePair_ = subBuilder.buildPartial();
              }
              bitField0_ |= 0x00000001;
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteRequestProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public WriteRequestProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new WriteRequestProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
    public static final int KEYVALUEPAIR_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto keyValuePair_;
    /**
     * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
     */
    public boolean hasKeyValuePair() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto getKeyValuePair() {
      return keyValuePair_;
    }
    /**
     * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder getKeyValuePairOrBuilder() {
      return keyValuePair_;
    }

    private void initFields() {
      keyValuePair_ = org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      if (!hasKeyValuePair()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getKeyValuePair().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, keyValuePair_);
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, keyValuePair_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto) obj;

      boolean result = true;
      result = result && (hasKeyValuePair() == other.hasKeyValuePair());
      if (hasKeyValuePair()) {
        result = result && getKeyValuePair()
            .equals(other.getKeyValuePair());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasKeyValuePair()) {
        hash = (37 * hash) + KEYVALUEPAIR_FIELD_NUMBER;
        hash = (53 * hash) + getKeyValuePair().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.WriteRequestProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteRequestProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getKeyValuePairFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        if (keyValuePairBuilder_ == null) {
          keyValuePair_ = org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance();
        } else {
          keyValuePairBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteRequestProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (keyValuePairBuilder_ == null) {
          result.keyValuePair_ = keyValuePair_;
        } else {
          result.keyValuePair_ = keyValuePairBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.getDefaultInstance()) return this;
        if (other.hasKeyValuePair()) {
          mergeKeyValuePair(other.getKeyValuePair());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        if (!hasKeyValuePair()) {
          
          return false;
        }
        if (!getKeyValuePair().isInitialized()) {
          
          return false;
        }
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
      private org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto keyValuePair_ = org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance();
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder> keyValuePairBuilder_;
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public boolean hasKeyValuePair() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto getKeyValuePair() {
        if (keyValuePairBuilder_ == null) {
          return keyValuePair_;
        } else {
          return keyValuePairBuilder_.getMessage();
        }
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public Builder setKeyValuePair(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto value) {
        if (keyValuePairBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          keyValuePair_ = value;
          onChanged();
        } else {
          keyValuePairBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public Builder setKeyValuePair(
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder builderForValue) {
        if (keyValuePairBuilder_ == null) {
          keyValuePair_ = builderForValue.build();
          onChanged();
        } else {
          keyValuePairBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public Builder mergeKeyValuePair(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto value) {
        if (keyValuePairBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              keyValuePair_ != org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance()) {
            keyValuePair_ =
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.newBuilder(keyValuePair_).mergeFrom(value).buildPartial();
          } else {
            keyValuePair_ = value;
          }
          onChanged();
        } else {
          keyValuePairBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public Builder clearKeyValuePair() {
        if (keyValuePairBuilder_ == null) {
          keyValuePair_ = org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance();
          onChanged();
        } else {
          keyValuePairBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder getKeyValuePairBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getKeyValuePairFieldBuilder().getBuilder();
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder getKeyValuePairOrBuilder() {
        if (keyValuePairBuilder_ != null) {
          return keyValuePairBuilder_.getMessageOrBuilder();
        } else {
          return keyValuePair_;
        }
      }
      /**
       * required .hadoop.hdfs.KeyValueProto keyValuePair = 1;
       */
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder> 
          getKeyValuePairFieldBuilder() {
        if (keyValuePairBuilder_ == null) {
          keyValuePairBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder>(
                  keyValuePair_,
                  getParentForChildren(),
                  isClean());
          keyValuePair_ = null;
        }
        return keyValuePairBuilder_;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.WriteRequestProto)
    }

    static {
      defaultInstance = new WriteRequestProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.WriteRequestProto)
  }

  public interface WriteResponseProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.hdfs.WriteResponseProto}
   */
  public static final class WriteResponseProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements WriteResponseProtoOrBuilder {
    // Use WriteResponseProto.newBuilder() to construct.
    private WriteResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private WriteResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final WriteResponseProto defaultInstance;
    public static WriteResponseProto getDefaultInstance() {
      return defaultInstance;
    }

    public WriteResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private WriteResponseProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteResponseProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public WriteResponseProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new WriteResponseProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto) obj;

      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.WriteResponseProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteResponseProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_WriteResponseProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto(this);
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.WriteResponseProto)
    }

    static {
      defaultInstance = new WriteResponseProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.WriteResponseProto)
  }

  public interface ReadRequestProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {

    // required .hadoop.hdfs.BlockProto key = 1;
    /**
     * required .hadoop.hdfs.BlockProto key = 1;
     */
    boolean hasKey();
    /**
     * required .hadoop.hdfs.BlockProto key = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getKey();
    /**
     * required .hadoop.hdfs.BlockProto key = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getKeyOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ReadRequestProto}
   */
  public static final class ReadRequestProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements ReadRequestProtoOrBuilder {
    // Use ReadRequestProto.newBuilder() to construct.
    private ReadRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private ReadRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final ReadRequestProto defaultInstance;
    public static ReadRequestProto getDefaultInstance() {
      return defaultInstance;
    }

    public ReadRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private ReadRequestProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null;
              if (((bitField0_ & 0x00000001) == 0x00000001)) {
                subBuilder = key_.toBuilder();
              }
              key_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(key_);
                key_ = subBuilder.buildPartial();
              }
              bitField0_ |= 0x00000001;
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadRequestProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public ReadRequestProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new ReadRequestProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // required .hadoop.hdfs.BlockProto key = 1;
    public static final int KEY_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto key_;
    /**
     * required .hadoop.hdfs.BlockProto key = 1;
     */
    public boolean hasKey() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * required .hadoop.hdfs.BlockProto key = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getKey() {
      return key_;
    }
    /**
     * required .hadoop.hdfs.BlockProto key = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getKeyOrBuilder() {
      return key_;
    }

    private void initFields() {
      key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      if (!hasKey()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getKey().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, key_);
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, key_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto) obj;

      boolean result = true;
      result = result && (hasKey() == other.hasKey());
      if (hasKey()) {
        result = result && getKey()
            .equals(other.getKey());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasKey()) {
        hash = (37 * hash) + KEY_FIELD_NUMBER;
        hash = (53 * hash) + getKey().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ReadRequestProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadRequestProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getKeyFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        if (keyBuilder_ == null) {
          key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
        } else {
          keyBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadRequestProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (keyBuilder_ == null) {
          result.key_ = key_;
        } else {
          result.key_ = keyBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.getDefaultInstance()) return this;
        if (other.hasKey()) {
          mergeKey(other.getKey());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        if (!hasKey()) {
          
          return false;
        }
        if (!getKey().isInitialized()) {
          
          return false;
        }
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // required .hadoop.hdfs.BlockProto key = 1;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> keyBuilder_;
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public boolean hasKey() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getKey() {
        if (keyBuilder_ == null) {
          return key_;
        } else {
          return keyBuilder_.getMessage();
        }
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder setKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (keyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          key_ = value;
          onChanged();
        } else {
          keyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder setKey(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (keyBuilder_ == null) {
          key_ = builderForValue.build();
          onChanged();
        } else {
          keyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder mergeKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (keyBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              key_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
            key_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(key_).mergeFrom(value).buildPartial();
          } else {
            key_ = value;
          }
          onChanged();
        } else {
          keyBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public Builder clearKey() {
        if (keyBuilder_ == null) {
          key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
          onChanged();
        } else {
          keyBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getKeyBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getKeyFieldBuilder().getBuilder();
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getKeyOrBuilder() {
        if (keyBuilder_ != null) {
          return keyBuilder_.getMessageOrBuilder();
        } else {
          return key_;
        }
      }
      /**
       * required .hadoop.hdfs.BlockProto key = 1;
       */
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getKeyFieldBuilder() {
        if (keyBuilder_ == null) {
          keyBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                  key_,
                  getParentForChildren(),
                  isClean());
          key_ = null;
        }
        return keyBuilder_;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReadRequestProto)
    }

    static {
      defaultInstance = new ReadRequestProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReadRequestProto)
  }

  public interface ReadResponseProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {

    // optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
     */
    boolean hasValue();
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getValue();
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder getValueOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ReadResponseProto}
   */
  public static final class ReadResponseProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements ReadResponseProtoOrBuilder {
    // Use ReadResponseProto.newBuilder() to construct.
    private ReadResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private ReadResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final ReadResponseProto defaultInstance;
    public static ReadResponseProto getDefaultInstance() {
      return defaultInstance;
    }

    public ReadResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private ReadResponseProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder subBuilder = null;
              if (((bitField0_ & 0x00000001) == 0x00000001)) {
                subBuilder = value_.toBuilder();
              }
              value_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.PARSER, extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(value_);
                value_ = subBuilder.buildPartial();
              }
              bitField0_ |= 0x00000001;
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadResponseProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public ReadResponseProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new ReadResponseProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
    public static final int VALUE_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value_;
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
     */
    public boolean hasValue() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getValue() {
      return value_;
    }
    /**
     * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder getValueOrBuilder() {
      return value_;
    }

    private void initFields() {
      value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      if (hasValue()) {
        if (!getValue().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, value_);
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, value_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto) obj;

      boolean result = true;
      result = result && (hasValue() == other.hasValue());
      if (hasValue()) {
        result = result && getValue()
            .equals(other.getValue());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasValue()) {
        hash = (37 * hash) + VALUE_FIELD_NUMBER;
        hash = (53 * hash) + getValue().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ReadResponseProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadResponseProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getValueFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        if (valueBuilder_ == null) {
          value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
        } else {
          valueBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ReadResponseProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (valueBuilder_ == null) {
          result.value_ = value_;
        } else {
          result.value_ = valueBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.getDefaultInstance()) return this;
        if (other.hasValue()) {
          mergeValue(other.getValue());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        if (hasValue()) {
          if (!getValue().isInitialized()) {
            
            return false;
          }
        }
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder> valueBuilder_;
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public boolean hasValue() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getValue() {
        if (valueBuilder_ == null) {
          return value_;
        } else {
          return valueBuilder_.getMessage();
        }
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public Builder setValue(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value) {
        if (valueBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          value_ = value;
          onChanged();
        } else {
          valueBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public Builder setValue(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder builderForValue) {
        if (valueBuilder_ == null) {
          value_ = builderForValue.build();
          onChanged();
        } else {
          valueBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public Builder mergeValue(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto value) {
        if (valueBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              value_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance()) {
            value_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.newBuilder(value_).mergeFrom(value).buildPartial();
          } else {
            value_ = value;
          }
          onChanged();
        } else {
          valueBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public Builder clearValue() {
        if (valueBuilder_ == null) {
          value_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
          onChanged();
        } else {
          valueBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder getValueBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getValueFieldBuilder().getBuilder();
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder getValueOrBuilder() {
        if (valueBuilder_ != null) {
          return valueBuilder_.getMessageOrBuilder();
        } else {
          return value_;
        }
      }
      /**
       * optional .hadoop.hdfs.ProvidedStorageLocationProto value = 1;
       */
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder> 
          getValueFieldBuilder() {
        if (valueBuilder_ == null) {
          valueBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder>(
                  value_,
                  getParentForChildren(),
                  isClean());
          value_ = null;
        }
        return valueBuilder_;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReadResponseProto)
    }

    static {
      defaultInstance = new ReadResponseProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReadResponseProto)
  }

  public interface ListRequestProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {

    // optional .hadoop.hdfs.BlockProto marker = 1;
    /**
     * optional .hadoop.hdfs.BlockProto marker = 1;
     */
    boolean hasMarker();
    /**
     * optional .hadoop.hdfs.BlockProto marker = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getMarker();
    /**
     * optional .hadoop.hdfs.BlockProto marker = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getMarkerOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ListRequestProto}
   */
  public static final class ListRequestProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements ListRequestProtoOrBuilder {
    // Use ListRequestProto.newBuilder() to construct.
    private ListRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private ListRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final ListRequestProto defaultInstance;
    public static ListRequestProto getDefaultInstance() {
      return defaultInstance;
    }

    public ListRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private ListRequestProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null;
              if (((bitField0_ & 0x00000001) == 0x00000001)) {
                subBuilder = marker_.toBuilder();
              }
              marker_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(marker_);
                marker_ = subBuilder.buildPartial();
              }
              bitField0_ |= 0x00000001;
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListRequestProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public ListRequestProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new ListRequestProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // optional .hadoop.hdfs.BlockProto marker = 1;
    public static final int MARKER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto marker_;
    /**
     * optional .hadoop.hdfs.BlockProto marker = 1;
     */
    public boolean hasMarker() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * optional .hadoop.hdfs.BlockProto marker = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getMarker() {
      return marker_;
    }
    /**
     * optional .hadoop.hdfs.BlockProto marker = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getMarkerOrBuilder() {
      return marker_;
    }

    private void initFields() {
      marker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      if (hasMarker()) {
        if (!getMarker().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(1, marker_);
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, marker_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto) obj;

      boolean result = true;
      result = result && (hasMarker() == other.hasMarker());
      if (hasMarker()) {
        result = result && getMarker()
            .equals(other.getMarker());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasMarker()) {
        hash = (37 * hash) + MARKER_FIELD_NUMBER;
        hash = (53 * hash) + getMarker().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ListRequestProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListRequestProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getMarkerFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        if (markerBuilder_ == null) {
          marker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
        } else {
          markerBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListRequestProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        if (markerBuilder_ == null) {
          result.marker_ = marker_;
        } else {
          result.marker_ = markerBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance()) return this;
        if (other.hasMarker()) {
          mergeMarker(other.getMarker());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        if (hasMarker()) {
          if (!getMarker().isInitialized()) {
            
            return false;
          }
        }
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // optional .hadoop.hdfs.BlockProto marker = 1;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto marker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> markerBuilder_;
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public boolean hasMarker() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getMarker() {
        if (markerBuilder_ == null) {
          return marker_;
        } else {
          return markerBuilder_.getMessage();
        }
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public Builder setMarker(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (markerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          marker_ = value;
          onChanged();
        } else {
          markerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public Builder setMarker(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (markerBuilder_ == null) {
          marker_ = builderForValue.build();
          onChanged();
        } else {
          markerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public Builder mergeMarker(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (markerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001) &&
              marker_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
            marker_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(marker_).mergeFrom(value).buildPartial();
          } else {
            marker_ = value;
          }
          onChanged();
        } else {
          markerBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public Builder clearMarker() {
        if (markerBuilder_ == null) {
          marker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
          onChanged();
        } else {
          markerBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getMarkerBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getMarkerFieldBuilder().getBuilder();
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getMarkerOrBuilder() {
        if (markerBuilder_ != null) {
          return markerBuilder_.getMessageOrBuilder();
        } else {
          return marker_;
        }
      }
      /**
       * optional .hadoop.hdfs.BlockProto marker = 1;
       */
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getMarkerFieldBuilder() {
        if (markerBuilder_ == null) {
          markerBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                  marker_,
                  getParentForChildren(),
                  isClean());
          marker_ = null;
        }
        return markerBuilder_;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListRequestProto)
    }

    static {
      defaultInstance = new ListRequestProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListRequestProto)
  }

  public interface ListResponseProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {

    // repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    java.util.List 
        getFileRegionsList();
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto getFileRegions(int index);
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    int getFileRegionsCount();
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    java.util.List 
        getFileRegionsOrBuilderList();
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder getFileRegionsOrBuilder(
        int index);

    // optional .hadoop.hdfs.BlockProto nextMarker = 2;
    /**
     * optional .hadoop.hdfs.BlockProto nextMarker = 2;
     */
    boolean hasNextMarker();
    /**
     * optional .hadoop.hdfs.BlockProto nextMarker = 2;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getNextMarker();
    /**
     * optional .hadoop.hdfs.BlockProto nextMarker = 2;
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getNextMarkerOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ListResponseProto}
   */
  public static final class ListResponseProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements ListResponseProtoOrBuilder {
    // Use ListResponseProto.newBuilder() to construct.
    private ListResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private ListResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final ListResponseProto defaultInstance;
    public static ListResponseProto getDefaultInstance() {
      return defaultInstance;
    }

    public ListResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private ListResponseProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
                fileRegions_ = new java.util.ArrayList();
                mutable_bitField0_ |= 0x00000001;
              }
              fileRegions_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.PARSER, extensionRegistry));
              break;
            }
            case 18: {
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null;
              if (((bitField0_ & 0x00000001) == 0x00000001)) {
                subBuilder = nextMarker_.toBuilder();
              }
              nextMarker_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(nextMarker_);
                nextMarker_ = subBuilder.buildPartial();
              }
              bitField0_ |= 0x00000001;
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
          fileRegions_ = java.util.Collections.unmodifiableList(fileRegions_);
        }
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListResponseProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public ListResponseProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new ListResponseProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
    public static final int FILEREGIONS_FIELD_NUMBER = 1;
    private java.util.List fileRegions_;
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    public java.util.List getFileRegionsList() {
      return fileRegions_;
    }
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    public java.util.List 
        getFileRegionsOrBuilderList() {
      return fileRegions_;
    }
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    public int getFileRegionsCount() {
      return fileRegions_.size();
    }
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto getFileRegions(int index) {
      return fileRegions_.get(index);
    }
    /**
     * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
     */
    public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder getFileRegionsOrBuilder(
        int index) {
      return fileRegions_.get(index);
    }

    // optional .hadoop.hdfs.BlockProto nextMarker = 2;
    public static final int NEXTMARKER_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto nextMarker_;
    /**
     * optional .hadoop.hdfs.BlockProto nextMarker = 2;
     */
    public boolean hasNextMarker() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * optional .hadoop.hdfs.BlockProto nextMarker = 2;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getNextMarker() {
      return nextMarker_;
    }
    /**
     * optional .hadoop.hdfs.BlockProto nextMarker = 2;
     */
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getNextMarkerOrBuilder() {
      return nextMarker_;
    }

    private void initFields() {
      fileRegions_ = java.util.Collections.emptyList();
      nextMarker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      for (int i = 0; i < getFileRegionsCount(); i++) {
        if (!getFileRegions(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasNextMarker()) {
        if (!getNextMarker().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      for (int i = 0; i < fileRegions_.size(); i++) {
        output.writeMessage(1, fileRegions_.get(i));
      }
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeMessage(2, nextMarker_);
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < fileRegions_.size(); i++) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(1, fileRegions_.get(i));
      }
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeMessageSize(2, nextMarker_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto) obj;

      boolean result = true;
      result = result && getFileRegionsList()
          .equals(other.getFileRegionsList());
      result = result && (hasNextMarker() == other.hasNextMarker());
      if (hasNextMarker()) {
        result = result && getNextMarker()
            .equals(other.getNextMarker());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (getFileRegionsCount() > 0) {
        hash = (37 * hash) + FILEREGIONS_FIELD_NUMBER;
        hash = (53 * hash) + getFileRegionsList().hashCode();
      }
      if (hasNextMarker()) {
        hash = (37 * hash) + NEXTMARKER_FIELD_NUMBER;
        hash = (53 * hash) + getNextMarker().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ListResponseProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListResponseProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
          getFileRegionsFieldBuilder();
          getNextMarkerFieldBuilder();
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        if (fileRegionsBuilder_ == null) {
          fileRegions_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
        } else {
          fileRegionsBuilder_.clear();
        }
        if (nextMarkerBuilder_ == null) {
          nextMarker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
        } else {
          nextMarkerBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_ListResponseProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (fileRegionsBuilder_ == null) {
          if (((bitField0_ & 0x00000001) == 0x00000001)) {
            fileRegions_ = java.util.Collections.unmodifiableList(fileRegions_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.fileRegions_ = fileRegions_;
        } else {
          result.fileRegions_ = fileRegionsBuilder_.build();
        }
        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
          to_bitField0_ |= 0x00000001;
        }
        if (nextMarkerBuilder_ == null) {
          result.nextMarker_ = nextMarker_;
        } else {
          result.nextMarker_ = nextMarkerBuilder_.build();
        }
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.getDefaultInstance()) return this;
        if (fileRegionsBuilder_ == null) {
          if (!other.fileRegions_.isEmpty()) {
            if (fileRegions_.isEmpty()) {
              fileRegions_ = other.fileRegions_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureFileRegionsIsMutable();
              fileRegions_.addAll(other.fileRegions_);
            }
            onChanged();
          }
        } else {
          if (!other.fileRegions_.isEmpty()) {
            if (fileRegionsBuilder_.isEmpty()) {
              fileRegionsBuilder_.dispose();
              fileRegionsBuilder_ = null;
              fileRegions_ = other.fileRegions_;
              bitField0_ = (bitField0_ & ~0x00000001);
              fileRegionsBuilder_ = 
                io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
                   getFileRegionsFieldBuilder() : null;
            } else {
              fileRegionsBuilder_.addAllMessages(other.fileRegions_);
            }
          }
        }
        if (other.hasNextMarker()) {
          mergeNextMarker(other.getNextMarker());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        for (int i = 0; i < getFileRegionsCount(); i++) {
          if (!getFileRegions(i).isInitialized()) {
            
            return false;
          }
        }
        if (hasNextMarker()) {
          if (!getNextMarker().isInitialized()) {
            
            return false;
          }
        }
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
      private java.util.List fileRegions_ =
        java.util.Collections.emptyList();
      private void ensureFileRegionsIsMutable() {
        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
          fileRegions_ = new java.util.ArrayList(fileRegions_);
          bitField0_ |= 0x00000001;
         }
      }

      private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder> fileRegionsBuilder_;

      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public java.util.List getFileRegionsList() {
        if (fileRegionsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(fileRegions_);
        } else {
          return fileRegionsBuilder_.getMessageList();
        }
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public int getFileRegionsCount() {
        if (fileRegionsBuilder_ == null) {
          return fileRegions_.size();
        } else {
          return fileRegionsBuilder_.getCount();
        }
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto getFileRegions(int index) {
        if (fileRegionsBuilder_ == null) {
          return fileRegions_.get(index);
        } else {
          return fileRegionsBuilder_.getMessage(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder setFileRegions(
          int index, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto value) {
        if (fileRegionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureFileRegionsIsMutable();
          fileRegions_.set(index, value);
          onChanged();
        } else {
          fileRegionsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder setFileRegions(
          int index, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder builderForValue) {
        if (fileRegionsBuilder_ == null) {
          ensureFileRegionsIsMutable();
          fileRegions_.set(index, builderForValue.build());
          onChanged();
        } else {
          fileRegionsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder addFileRegions(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto value) {
        if (fileRegionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureFileRegionsIsMutable();
          fileRegions_.add(value);
          onChanged();
        } else {
          fileRegionsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder addFileRegions(
          int index, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto value) {
        if (fileRegionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureFileRegionsIsMutable();
          fileRegions_.add(index, value);
          onChanged();
        } else {
          fileRegionsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder addFileRegions(
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder builderForValue) {
        if (fileRegionsBuilder_ == null) {
          ensureFileRegionsIsMutable();
          fileRegions_.add(builderForValue.build());
          onChanged();
        } else {
          fileRegionsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder addFileRegions(
          int index, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder builderForValue) {
        if (fileRegionsBuilder_ == null) {
          ensureFileRegionsIsMutable();
          fileRegions_.add(index, builderForValue.build());
          onChanged();
        } else {
          fileRegionsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder addAllFileRegions(
          java.lang.Iterable values) {
        if (fileRegionsBuilder_ == null) {
          ensureFileRegionsIsMutable();
          super.addAll(values, fileRegions_);
          onChanged();
        } else {
          fileRegionsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder clearFileRegions() {
        if (fileRegionsBuilder_ == null) {
          fileRegions_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          fileRegionsBuilder_.clear();
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public Builder removeFileRegions(int index) {
        if (fileRegionsBuilder_ == null) {
          ensureFileRegionsIsMutable();
          fileRegions_.remove(index);
          onChanged();
        } else {
          fileRegionsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder getFileRegionsBuilder(
          int index) {
        return getFileRegionsFieldBuilder().getBuilder(index);
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder getFileRegionsOrBuilder(
          int index) {
        if (fileRegionsBuilder_ == null) {
          return fileRegions_.get(index);  } else {
          return fileRegionsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public java.util.List 
           getFileRegionsOrBuilderList() {
        if (fileRegionsBuilder_ != null) {
          return fileRegionsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(fileRegions_);
        }
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder addFileRegionsBuilder() {
        return getFileRegionsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder addFileRegionsBuilder(
          int index) {
        return getFileRegionsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.getDefaultInstance());
      }
      /**
       * repeated .hadoop.hdfs.KeyValueProto fileRegions = 1;
       */
      public java.util.List 
           getFileRegionsBuilderList() {
        return getFileRegionsFieldBuilder().getBuilderList();
      }
      private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder> 
          getFileRegionsFieldBuilder() {
        if (fileRegionsBuilder_ == null) {
          fileRegionsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.KeyValueProtoOrBuilder>(
                  fileRegions_,
                  ((bitField0_ & 0x00000001) == 0x00000001),
                  getParentForChildren(),
                  isClean());
          fileRegions_ = null;
        }
        return fileRegionsBuilder_;
      }

      // optional .hadoop.hdfs.BlockProto nextMarker = 2;
      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto nextMarker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> nextMarkerBuilder_;
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public boolean hasNextMarker() {
        return ((bitField0_ & 0x00000002) == 0x00000002);
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getNextMarker() {
        if (nextMarkerBuilder_ == null) {
          return nextMarker_;
        } else {
          return nextMarkerBuilder_.getMessage();
        }
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public Builder setNextMarker(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (nextMarkerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          nextMarker_ = value;
          onChanged();
        } else {
          nextMarkerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public Builder setNextMarker(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (nextMarkerBuilder_ == null) {
          nextMarker_ = builderForValue.build();
          onChanged();
        } else {
          nextMarkerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public Builder mergeNextMarker(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (nextMarkerBuilder_ == null) {
          if (((bitField0_ & 0x00000002) == 0x00000002) &&
              nextMarker_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
            nextMarker_ =
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(nextMarker_).mergeFrom(value).buildPartial();
          } else {
            nextMarker_ = value;
          }
          onChanged();
        } else {
          nextMarkerBuilder_.mergeFrom(value);
        }
        bitField0_ |= 0x00000002;
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public Builder clearNextMarker() {
        if (nextMarkerBuilder_ == null) {
          nextMarker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
          onChanged();
        } else {
          nextMarkerBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        return this;
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getNextMarkerBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getNextMarkerFieldBuilder().getBuilder();
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getNextMarkerOrBuilder() {
        if (nextMarkerBuilder_ != null) {
          return nextMarkerBuilder_.getMessageOrBuilder();
        } else {
          return nextMarker_;
        }
      }
      /**
       * optional .hadoop.hdfs.BlockProto nextMarker = 2;
       */
      private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getNextMarkerFieldBuilder() {
        if (nextMarkerBuilder_ == null) {
          nextMarkerBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                  nextMarker_,
                  getParentForChildren(),
                  isClean());
          nextMarker_ = null;
        }
        return nextMarkerBuilder_;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListResponseProto)
    }

    static {
      defaultInstance = new ListResponseProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListResponseProto)
  }

  public interface BlockPoolRequestProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.hdfs.BlockPoolRequestProto}
   */
  public static final class BlockPoolRequestProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements BlockPoolRequestProtoOrBuilder {
    // Use BlockPoolRequestProto.newBuilder() to construct.
    private BlockPoolRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private BlockPoolRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final BlockPoolRequestProto defaultInstance;
    public static BlockPoolRequestProto getDefaultInstance() {
      return defaultInstance;
    }

    public BlockPoolRequestProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private BlockPoolRequestProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolRequestProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public BlockPoolRequestProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new BlockPoolRequestProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private void initFields() {
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto) obj;

      boolean result = true;
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.BlockPoolRequestProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolRequestProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolRequestProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto(this);
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockPoolRequestProto)
    }

    static {
      defaultInstance = new BlockPoolRequestProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockPoolRequestProto)
  }

  public interface BlockPoolResponseProtoOrBuilder
      extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {

    // required string blockPoolId = 1;
    /**
     * required string blockPoolId = 1;
     */
    boolean hasBlockPoolId();
    /**
     * required string blockPoolId = 1;
     */
    java.lang.String getBlockPoolId();
    /**
     * required string blockPoolId = 1;
     */
    io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
        getBlockPoolIdBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.BlockPoolResponseProto}
   */
  public static final class BlockPoolResponseProto extends
      io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
      implements BlockPoolResponseProtoOrBuilder {
    // Use BlockPoolResponseProto.newBuilder() to construct.
    private BlockPoolResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) {
      super(builder);
      this.unknownFields = builder.getUnknownFields();
    }
    private BlockPoolResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    private static final BlockPoolResponseProto defaultInstance;
    public static BlockPoolResponseProto getDefaultInstance() {
      return defaultInstance;
    }

    public BlockPoolResponseProto getDefaultInstanceForType() {
      return defaultInstance;
    }

    private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
    @java.lang.Override
    public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
        getUnknownFields() {
      return this.unknownFields;
    }
    private BlockPoolResponseProto(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      initFields();
      int mutable_bitField0_ = 0;
      io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
          io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            default: {
              if (!parseUnknownField(input, unknownFields,
                                     extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
            case 10: {
              bitField0_ |= 0x00000001;
              blockPoolId_ = input.readBytes();
              break;
            }
          }
        }
      } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.setUnfinishedMessage(this);
      } catch (java.io.IOException e) {
        throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
            e.getMessage()).setUnfinishedMessage(this);
      } finally {
        this.unknownFields = unknownFields.build();
        makeExtensionsImmutable();
      }
    }
    public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolResponseProto_descriptor;
    }

    protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.Builder.class);
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
        new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
      public BlockPoolResponseProto parsePartialFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
        return new BlockPoolResponseProto(input, extensionRegistry);
      }
    };

    @java.lang.Override
    public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
      return PARSER;
    }

    private int bitField0_;
    // required string blockPoolId = 1;
    public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
    private java.lang.Object blockPoolId_;
    /**
     * required string blockPoolId = 1;
     */
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000001) == 0x00000001);
    }
    /**
     * required string blockPoolId = 1;
     */
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = 
            (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * required string blockPoolId = 1;
     */
    public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = 
            io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
      }
    }

    private void initFields() {
      blockPoolId_ = "";
    }
    private byte memoizedIsInitialized = -1;
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized != -1) return isInitialized == 1;

      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        output.writeBytes(1, getBlockPoolIdBytes());
      }
      getUnknownFields().writeTo(output);
    }

    private int memoizedSerializedSize = -1;
    public int getSerializedSize() {
      int size = memoizedSerializedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) == 0x00000001)) {
        size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
          .computeBytesSize(1, getBlockPoolIdBytes());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSerializedSize = size;
      return size;
    }

    private static final long serialVersionUID = 0L;
    @java.lang.Override
    protected java.lang.Object writeReplace()
        throws java.io.ObjectStreamException {
      return super.writeReplace();
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto) obj;

      boolean result = true;
      result = result && (hasBlockPoolId() == other.hasBlockPoolId());
      if (hasBlockPoolId()) {
        result = result && getBlockPoolId()
            .equals(other.getBlockPoolId());
      }
      result = result &&
          getUnknownFields().equals(other.getUnknownFields());
      return result;
    }

    private int memoizedHashCode = 0;
    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptorForType().hashCode();
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(byte[] data)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(
        byte[] data,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseDelimitedFrom(input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return PARSER.parseFrom(input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parseFrom(
        io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
        io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return PARSER.parseFrom(input, extensionRegistry);
    }

    public static Builder newBuilder() { return Builder.create(); }
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto prototype) {
      return newBuilder().mergeFrom(prototype);
    }
    public Builder toBuilder() { return newBuilder(this); }

    @java.lang.Override
    protected Builder newBuilderForType(
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.BlockPoolResponseProto}
     */
    public static final class Builder extends
        io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
       implements org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProtoOrBuilder {
      public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolResponseProto_descriptor;
      }

      protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
        }
      }
      private static Builder create() {
        return new Builder();
      }

      public Builder clear() {
        super.clear();
        blockPoolId_ = "";
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      public Builder clone() {
        return create().mergeFrom(buildPartial());
      }

      public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.internal_static_hadoop_hdfs_BlockPoolResponseProto_descriptor;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance();
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto(this);
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
          to_bitField0_ |= 0x00000001;
        }
        result.blockPoolId_ = blockPoolId_;
        result.bitField0_ = to_bitField0_;
        onBuilt();
        return result;
      }

      public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance()) return this;
        if (other.hasBlockPoolId()) {
          bitField0_ |= 0x00000001;
          blockPoolId_ = other.blockPoolId_;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        return this;
      }

      public final boolean isInitialized() {
        if (!hasBlockPoolId()) {
          
          return false;
        }
        return true;
      }

      public Builder mergeFrom(
          io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
          io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto parsedMessage = null;
        try {
          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
        } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
          parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto) e.getUnfinishedMessage();
          throw e;
        } finally {
          if (parsedMessage != null) {
            mergeFrom(parsedMessage);
          }
        }
        return this;
      }
      private int bitField0_;

      // required string blockPoolId = 1;
      private java.lang.Object blockPoolId_ = "";
      /**
       * required string blockPoolId = 1;
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000001) == 0x00000001);
      }
      /**
       * required string blockPoolId = 1;
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
              .toStringUtf8();
          blockPoolId_ = s;
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * required string blockPoolId = 1;
       */
      public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = 
              io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
        }
      }
      /**
       * required string blockPoolId = 1;
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) {
    throw new NullPointerException();
  }
  bitField0_ |= 0x00000001;
        blockPoolId_ = value;
        onChanged();
        return this;
      }
      /**
       * required string blockPoolId = 1;
       */
      public Builder clearBlockPoolId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        onChanged();
        return this;
      }
      /**
       * required string blockPoolId = 1;
       */
      public Builder setBlockPoolIdBytes(
          io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
        if (value == null) {
    throw new NullPointerException();
  }
  bitField0_ |= 0x00000001;
        blockPoolId_ = value;
        onChanged();
        return this;
      }

      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockPoolResponseProto)
    }

    static {
      defaultInstance = new BlockPoolResponseProto(true);
      defaultInstance.initFields();
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockPoolResponseProto)
  }

  /**
   * Protobuf service {@code hadoop.hdfs.AliasMapProtocolService}
   */
  public static abstract class AliasMapProtocolService
      implements io.prestosql.hadoop.$internal.com.google.protobuf.Service {
    protected AliasMapProtocolService() {}

    public interface Interface {
      /**
       * rpc write(.hadoop.hdfs.WriteRequestProto) returns (.hadoop.hdfs.WriteResponseProto);
       */
      public abstract void write(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

      /**
       * rpc read(.hadoop.hdfs.ReadRequestProto) returns (.hadoop.hdfs.ReadResponseProto);
       */
      public abstract void read(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

      /**
       * rpc list(.hadoop.hdfs.ListRequestProto) returns (.hadoop.hdfs.ListResponseProto);
       */
      public abstract void list(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

      /**
       * rpc getBlockPoolId(.hadoop.hdfs.BlockPoolRequestProto) returns (.hadoop.hdfs.BlockPoolResponseProto);
       */
      public abstract void getBlockPoolId(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.Service newReflectiveService(
        final Interface impl) {
      return new AliasMapProtocolService() {
        @java.lang.Override
        public  void write(
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
          impl.write(controller, request, done);
        }

        @java.lang.Override
        public  void read(
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
          impl.read(controller, request, done);
        }

        @java.lang.Override
        public  void list(
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
          impl.list(controller, request, done);
        }

        @java.lang.Override
        public  void getBlockPoolId(
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
          impl.getBlockPoolId(controller, request, done);
        }

      };
    }

    public static io.prestosql.hadoop.$internal.com.google.protobuf.BlockingService
        newReflectiveBlockingService(final BlockingInterface impl) {
      return new io.prestosql.hadoop.$internal.com.google.protobuf.BlockingService() {
        public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.ServiceDescriptor
            getDescriptorForType() {
          return getDescriptor();
        }

        public final io.prestosql.hadoop.$internal.com.google.protobuf.Message callBlockingMethod(
            io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
            io.prestosql.hadoop.$internal.com.google.protobuf.Message request)
            throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.callBlockingMethod() given method descriptor for " +
              "wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return impl.write(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto)request);
            case 1:
              return impl.read(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto)request);
            case 2:
              return impl.list(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto)request);
            case 3:
              return impl.getBlockPoolId(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto)request);
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

        public final io.prestosql.hadoop.$internal.com.google.protobuf.Message
            getRequestPrototype(
            io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getRequestPrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

        public final io.prestosql.hadoop.$internal.com.google.protobuf.Message
            getResponsePrototype(
            io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getResponsePrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

      };
    }

    /**
     * rpc write(.hadoop.hdfs.WriteRequestProto) returns (.hadoop.hdfs.WriteResponseProto);
     */
    public abstract void write(
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto request,
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

    /**
     * rpc read(.hadoop.hdfs.ReadRequestProto) returns (.hadoop.hdfs.ReadResponseProto);
     */
    public abstract void read(
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto request,
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

    /**
     * rpc list(.hadoop.hdfs.ListRequestProto) returns (.hadoop.hdfs.ListResponseProto);
     */
    public abstract void list(
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto request,
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

    /**
     * rpc getBlockPoolId(.hadoop.hdfs.BlockPoolRequestProto) returns (.hadoop.hdfs.BlockPoolResponseProto);
     */
    public abstract void getBlockPoolId(
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto request,
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done);

    public static final
        io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.ServiceDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.getDescriptor().getServices().get(0);
    }
    public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.ServiceDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }

    public final void callMethod(
        io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method,
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
        io.prestosql.hadoop.$internal.com.google.protobuf.Message request,
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback<
          io.prestosql.hadoop.$internal.com.google.protobuf.Message> done) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.callMethod() given method descriptor for wrong " +
          "service type.");
      }
      switch(method.getIndex()) {
        case 0:
          this.write(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto)request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 1:
          this.read(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto)request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 2:
          this.list(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto)request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        case 3:
          this.getBlockPoolId(controller, (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto)request,
            io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback(
              done));
          return;
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public final io.prestosql.hadoop.$internal.com.google.protobuf.Message
        getRequestPrototype(
        io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getRequestPrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public final io.prestosql.hadoop.$internal.com.google.protobuf.Message
        getResponsePrototype(
        io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getResponsePrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public static Stub newStub(
        io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel channel) {
      return new Stub(channel);
    }

    public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.AliasMapProtocolService implements Interface {
      private Stub(io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel channel) {
        this.channel = channel;
      }

      private final io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel channel;

      public io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel getChannel() {
        return channel;
      }

      public  void write(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.getDefaultInstance(),
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.getDefaultInstance()));
      }

      public  void read(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.getDefaultInstance(),
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.getDefaultInstance()));
      }

      public  void list(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.getDefaultInstance(),
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.getDefaultInstance()));
      }

      public  void getBlockPoolId(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto request,
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) {
        channel.callMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance(),
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance()));
      }
    }

    public static BlockingInterface newBlockingStub(
        io.prestosql.hadoop.$internal.com.google.protobuf.BlockingRpcChannel channel) {
      return new BlockingStub(channel);
    }

    public interface BlockingInterface {
      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto write(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto read(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto list(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto getBlockPoolId(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException;
    }

    private static final class BlockingStub implements BlockingInterface {
      private BlockingStub(io.prestosql.hadoop.$internal.com.google.protobuf.BlockingRpcChannel channel) {
        this.channel = channel;
      }

      private final io.prestosql.hadoop.$internal.com.google.protobuf.BlockingRpcChannel channel;

      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto write(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto read(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto list(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto getBlockPoolId(
          io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto request)
          throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance());
      }

    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.AliasMapProtocolService)
  }

  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_KeyValueProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_KeyValueProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_WriteRequestProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_WriteRequestProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_WriteResponseProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_WriteResponseProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ReadRequestProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_ReadRequestProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ReadResponseProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_ReadResponseProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ListRequestProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_ListRequestProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ListResponseProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_ListResponseProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BlockPoolRequestProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_BlockPoolRequestProto_fieldAccessorTable;
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BlockPoolResponseProto_descriptor;
  private static
    io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
      internal_static_hadoop_hdfs_BlockPoolResponseProto_fieldAccessorTable;

  public static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor
      getDescriptor() {
    return descriptor;
  }
  private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor
      descriptor;
  static {
    java.lang.String[] descriptorData = {
      "\n\026AliasMapProtocol.proto\022\013hadoop.hdfs\032\nh" +
      "dfs.proto\"o\n\rKeyValueProto\022$\n\003key\030\001 \001(\0132" +
      "\027.hadoop.hdfs.BlockProto\0228\n\005value\030\002 \001(\0132" +
      ").hadoop.hdfs.ProvidedStorageLocationPro" +
      "to\"E\n\021WriteRequestProto\0220\n\014keyValuePair\030" +
      "\001 \002(\0132\032.hadoop.hdfs.KeyValueProto\"\024\n\022Wri" +
      "teResponseProto\"8\n\020ReadRequestProto\022$\n\003k" +
      "ey\030\001 \002(\0132\027.hadoop.hdfs.BlockProto\"M\n\021Rea" +
      "dResponseProto\0228\n\005value\030\001 \001(\0132).hadoop.h" +
      "dfs.ProvidedStorageLocationProto\";\n\020List",
      "RequestProto\022\'\n\006marker\030\001 \001(\0132\027.hadoop.hd" +
      "fs.BlockProto\"q\n\021ListResponseProto\022/\n\013fi" +
      "leRegions\030\001 \003(\0132\032.hadoop.hdfs.KeyValuePr" +
      "oto\022+\n\nnextMarker\030\002 \001(\0132\027.hadoop.hdfs.Bl" +
      "ockProto\"\027\n\025BlockPoolRequestProto\"-\n\026Blo" +
      "ckPoolResponseProto\022\023\n\013blockPoolId\030\001 \002(\t" +
      "2\314\002\n\027AliasMapProtocolService\022H\n\005write\022\036." +
      "hadoop.hdfs.WriteRequestProto\032\037.hadoop.h" +
      "dfs.WriteResponseProto\022E\n\004read\022\035.hadoop." +
      "hdfs.ReadRequestProto\032\036.hadoop.hdfs.Read",
      "ResponseProto\022E\n\004list\022\035.hadoop.hdfs.List" +
      "RequestProto\032\036.hadoop.hdfs.ListResponseP" +
      "roto\022Y\n\016getBlockPoolId\022\".hadoop.hdfs.Blo" +
      "ckPoolRequestProto\032#.hadoop.hdfs.BlockPo" +
      "olResponseProtoBE\n%org.apache.hadoop.hdf" +
      "s.protocol.protoB\026AliasMapProtocolProtos" +
      "\210\001\001\240\001\001"
    };
    io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
      new io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
        public io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistry assignDescriptors(
            io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor root) {
          descriptor = root;
          internal_static_hadoop_hdfs_KeyValueProto_descriptor =
            getDescriptor().getMessageTypes().get(0);
          internal_static_hadoop_hdfs_KeyValueProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_KeyValueProto_descriptor,
              new java.lang.String[] { "Key", "Value", });
          internal_static_hadoop_hdfs_WriteRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(1);
          internal_static_hadoop_hdfs_WriteRequestProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_WriteRequestProto_descriptor,
              new java.lang.String[] { "KeyValuePair", });
          internal_static_hadoop_hdfs_WriteResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(2);
          internal_static_hadoop_hdfs_WriteResponseProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_WriteResponseProto_descriptor,
              new java.lang.String[] { });
          internal_static_hadoop_hdfs_ReadRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(3);
          internal_static_hadoop_hdfs_ReadRequestProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_ReadRequestProto_descriptor,
              new java.lang.String[] { "Key", });
          internal_static_hadoop_hdfs_ReadResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(4);
          internal_static_hadoop_hdfs_ReadResponseProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_ReadResponseProto_descriptor,
              new java.lang.String[] { "Value", });
          internal_static_hadoop_hdfs_ListRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(5);
          internal_static_hadoop_hdfs_ListRequestProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_ListRequestProto_descriptor,
              new java.lang.String[] { "Marker", });
          internal_static_hadoop_hdfs_ListResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(6);
          internal_static_hadoop_hdfs_ListResponseProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_ListResponseProto_descriptor,
              new java.lang.String[] { "FileRegions", "NextMarker", });
          internal_static_hadoop_hdfs_BlockPoolRequestProto_descriptor =
            getDescriptor().getMessageTypes().get(7);
          internal_static_hadoop_hdfs_BlockPoolRequestProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_BlockPoolRequestProto_descriptor,
              new java.lang.String[] { });
          internal_static_hadoop_hdfs_BlockPoolResponseProto_descriptor =
            getDescriptor().getMessageTypes().get(8);
          internal_static_hadoop_hdfs_BlockPoolResponseProto_fieldAccessorTable = new
            io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
              internal_static_hadoop_hdfs_BlockPoolResponseProto_descriptor,
              new java.lang.String[] { "BlockPoolId", });
          return null;
        }
      };
    io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor
      .internalBuildGeneratedFileFrom(descriptorData,
        new io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor[] {
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
        }, assigner);
  }

  // @@protoc_insertion_point(outer_class_scope)
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy